repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
stylianos-kampakis/scikit-learn
|
examples/cluster/plot_ward_structured_vs_unstructured.py
|
320
|
3369
|
"""
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
|
bsd-3-clause
|
miqlar/PyFME
|
examples/example_006.py
|
2
|
3524
|
# -*- coding: utf-8 -*-
"""
Python Flight Mechanics Engine (PyFME).
Copyright (c) AeroPython Development Team.
Distributed under the terms of the MIT License.
Example
-------
Cessna 310, ISA1976 integrated with Flat Earth (euler angles).
Evolution of the aircraft after a longitudinal perturbation (delta doublet).
Trimmed in stationary, horizontal, symmetric, wings level flight.
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pyfme.aircrafts import Cessna310
from pyfme.environment.environment import Environment
from pyfme.environment.atmosphere import ISA1976
from pyfme.environment.gravity import VerticalConstant
from pyfme.environment.wind import NoWind
from pyfme.models.systems import EulerFlatEarth
from pyfme.simulator import BatchSimulation
from pyfme.utils.trimmer import steady_state_flight_trimmer
aircraft = Cessna310()
atmosphere = ISA1976()
gravity = VerticalConstant()
wind = NoWind()
environment = Environment(atmosphere, gravity, wind)
# Initial conditions.
TAS = 312.5 * 0.3048 # m/s
h0 = 8000 * 0.3048 # m
psi0 = 1 # rad
x0, y0 = 0, 0 # m
turn_rate = 0.0 # rad/s
gamma0 = 0.00 # rad
system = EulerFlatEarth(lat=0, lon=0, h=h0, psi=psi0, x_earth=x0, y_earth=y0)
not_trimmed_controls = {'delta_elevator': 0.05,
'hor_tail_incidence': 0.00,
'delta_aileron': 0.01 * np.sign(turn_rate),
'delta_rudder': 0.01 * np.sign(turn_rate),
'delta_t': 0.5}
controls2trim = ['delta_elevator', 'delta_aileron', 'delta_rudder', 'delta_t']
trimmed_ac, trimmed_sys, trimmed_env, results = steady_state_flight_trimmer(
aircraft, system, environment, TAS=TAS, controls_0=not_trimmed_controls,
controls2trim=controls2trim, gamma=gamma0, turn_rate=turn_rate, verbose=2)
print(results)
my_simulation = BatchSimulation(trimmed_ac, trimmed_sys, trimmed_env)
tfin = 45 # seconds
N = tfin * 100 + 1
time = np.linspace(0, tfin, N)
initial_controls = trimmed_ac.controls
controls = {}
for control_name, control_value in initial_controls.items():
controls[control_name] = np.ones_like(time) * control_value
# Elevator doublet
controls['delta_elevator'][np.where(time<2)] = \
initial_controls['delta_elevator'] * 1.30
controls['delta_elevator'][np.where(time<1)] = \
initial_controls['delta_elevator'] * 0.70
my_simulation.set_controls(time, controls)
par_list = ['x_earth', 'y_earth', 'height',
'psi', 'theta', 'phi',
'u', 'v', 'w',
'v_north', 'v_east', 'v_down',
'p', 'q', 'r',
'alpha', 'beta', 'TAS',
'F_xb', 'F_yb', 'F_zb',
'M_xb', 'M_yb', 'M_zb']
my_simulation.set_par_dict(par_list)
my_simulation.run_simulation()
# print(my_simulation.par_dict)
plt.style.use('ggplot')
for ii in range(len(par_list) // 3):
three_params = par_list[3*ii:3*ii+3]
fig, ax = plt.subplots(3, 1, sharex=True)
for jj, par in enumerate(three_params):
ax[jj].plot(time, my_simulation.par_dict[par])
ax[jj].set_ylabel(par)
ax[jj].set_xlabel('time (s)')
fig = plt.figure()
ax = Axes3D(fig)
ax.plot(my_simulation.par_dict['x_earth'],
my_simulation.par_dict['y_earth'],
my_simulation.par_dict['height'])
ax.plot(my_simulation.par_dict['x_earth'],
my_simulation.par_dict['y_earth'],
my_simulation.par_dict['height'] * 0)
ax.set_xlabel('x_earth')
ax.set_ylabel('y_earth')
ax.set_zlabel('z_earth')
plt.show()
|
mit
|
fengzhyuan/scikit-learn
|
examples/covariance/plot_sparse_cov.py
|
300
|
5078
|
"""
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
|
bsd-3-clause
|
imaculate/scikit-learn
|
examples/exercises/plot_cv_digits.py
|
135
|
1223
|
"""
=============================================
Cross-validation on Digits Dataset Exercise
=============================================
A tutorial exercise using Cross-validation with an SVM on the Digits dataset.
This exercise is used in the :ref:`cv_generators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
from sklearn.model_selection import cross_val_score
from sklearn import datasets, svm
digits = datasets.load_digits()
X = digits.data
y = digits.target
svc = svm.SVC(kernel='linear')
C_s = np.logspace(-10, 0, 10)
scores = list()
scores_std = list()
for C in C_s:
svc.C = C
this_scores = cross_val_score(svc, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
# Do the plotting
import matplotlib.pyplot as plt
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.semilogx(C_s, scores)
plt.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--')
plt.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--')
locs, labels = plt.yticks()
plt.yticks(locs, list(map(lambda x: "%g" % x, locs)))
plt.ylabel('CV score')
plt.xlabel('Parameter C')
plt.ylim(0, 1.1)
plt.show()
|
bsd-3-clause
|
henridwyer/scikit-learn
|
examples/linear_model/plot_ols_ridge_variance.py
|
387
|
2060
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
|
bsd-3-clause
|
ICTatRTI/researchnet
|
researchnet/dashboard/views.py
|
1
|
3301
|
import csv
from core.models import Submission, Participant
from django.shortcuts import render
from django.contrib.auth.models import User
from django.http import HttpResponse,HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth import logout, authenticate, login
from django.shortcuts import redirect
from django.core.mail import send_mail
from pandas.io.json import json_normalize
import pandas as pd
from django.contrib.auth import (
REDIRECT_FIELD_NAME, get_user_model, login as auth_login,
logout as auth_logout, update_session_auth_hash,
)
from core.forms import ResearchnetAuthForm
# Create your views here.
@login_required
def index(request):
return render(request, 'index.html')
@login_required
def enrollment(request):
participants = Participant.objects.order_by('-user__date_joined')[:100]
context = {'participant_list': participants}
return render(request, 'participant.html', context)
@login_required
def export_submissions(request):
submissions = Submission.objects.all()
df = pd.DataFrame.from_records(json_normalize(submissions.values('id', 'device_id', 'lat', 'long', 'place', 'response', 'time_complete', 'time_start', 'timestamp', 'user__username', 'user__participant__gender')))
# Create the HttpResponse object with the appropriate CSV header.
export = df.to_csv()
response = HttpResponse(export, content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="study_submisisons.csv"'
return response
@login_required
def export_enrollees(request):
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="study_enrollees.csv"'
writer = csv.writer(response)
writer.writerow(['First Name', 'Last Name', 'Username', 'Email Address', 'Gender', 'DOB'])
participants = Participant.objects.all()
for participant in participants:
writer.writerow([participant.user.first_name, participant.user.last_name, participant.user.username, participant.user.email, participant.gender, participant.dob])
return response
def login_view(request, *args, **kwargs):
redirect_to = request.POST.get(REDIRECT_FIELD_NAME, request.GET.get(REDIRECT_FIELD_NAME, ''))
if request.method == "POST":
form = ResearchnetAuthForm(request, data=request.POST)
if form.is_valid():
user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password'])
if user is not None :
if user.has_perm('view_dashboard'):
login(request, user)
return HttpResponseRedirect(redirect_to)
else:
form.add_error(None, "Participant login not supported")
else:
print("not valid form")
form
else: # not a post
form = ResearchnetAuthForm(request)
context = {
'form': form,
REDIRECT_FIELD_NAME: redirect_to
}
return render(request, 'registration/login.html', context)
def logout_view(request):
logout(request)
return redirect('home')
|
gpl-3.0
|
EPFL-LCN/neuronaldynamics-exercises
|
neurodynex3/ojas_rule/oja.py
|
1
|
4820
|
"""
This file implements Oja's hebbian learning rule.
Relevant book chapters:
- http://neuronaldynamics.epfl.ch/online/Ch19.S2.html#SS1.p6
"""
# This file is part of the exercise code repository accompanying
# the book: Neuronal Dynamics (see http://neuronaldynamics.epfl.ch)
# located at http://github.com/EPFL-LCN/neuronaldynamics-exercises.
# This free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License 2.0 as published by the
# Free Software Foundation. You should have received a copy of the
# GNU General Public License along with the repository. If not,
# see http://www.gnu.org/licenses/.
# Should you reuse and publish the code for your own purposes,
# please cite the book or point to the webpage http://neuronaldynamics.epfl.ch.
# Wulfram Gerstner, Werner M. Kistler, Richard Naud, and Liam Paninski.
# Neuronal Dynamics: From Single Neurons to Networks and Models of Cognition.
# Cambridge University Press, 2014.
import matplotlib.pyplot as plt
import numpy as np
def make_cloud(n=2000, ratio=1, angle=0):
"""Returns an oriented elliptic
gaussian cloud of 2D points
Args:
n (int, optional): number of points in the cloud
ratio (int, optional): (std along the short axis) /
(std along the long axis)
angle (int, optional): rotation angle [deg]
Returns:
numpy.ndarray: array of datapoints
"""
if ratio > 1.:
ratio = 1. / ratio
x = np.random.randn(n, 1)
y = ratio * np.random.randn(n, 1)
z = np.concatenate((x, y), 1)
radangle = (180. - angle) * np.pi / 180.
transfo = [
[np.cos(radangle), np.sin(radangle)],
[-np.sin(radangle), np.cos(radangle)]
]
return np.dot(transfo, z.T).T
def learn(cloud, initial_angle=None, eta=0.005):
"""Run one batch of Oja's learning over
a cloud of datapoints.
Args:
cloud (numpy.ndarray): An N by 2 array of datapoints. You can
think of each of the two columns as the time series of firing rates of one presynaptic neuron.
initial_angle (float, optional): angle of initial
set of weights [deg]. If None, this is random.
eta (float, optional): learning rate
Returns:
numpy.ndarray: time course of the weight vector
"""
# get angle if not set
if initial_angle is None:
initial_angle = np.random.rand() * 360.
radangle = initial_angle * np.pi / 180.
w = np.array([np.cos(radangle), np.sin(radangle)])
wcourse = np.zeros((len(cloud), 2), float)
for i in range(0, len(cloud)):
wcourse[i] = w
y = np.dot(w, cloud[i]) # output: postsynaptic firing rate of a linear neuron.
# ojas rule (cloud[i] are the two presynaptic firing rates at time point i
w = w + eta * y * (cloud[i] - y * w)
return wcourse
def plot_oja_trace(data_cloud, weights_course):
"""
Plots the datapoints and the time series of the weights
Args:
data_cloud (numpy.ndarray): n by 2 data
weights_course (numpy.ndarray): n by 2 weights
Returns:
"""
plt.scatter(
data_cloud[:, 0],
data_cloud[:, 1],
marker=".",
facecolor="none",
edgecolor="#222222",
alpha=.2
)
plt.xlabel("x1")
plt.ylabel("x2")
# color time and plot with colorbar
time = np.arange(len(weights_course))
colors = plt.cm.cool(time / float(len(time)))
sm = plt.cm.ScalarMappable(
cmap=plt.cm.cool,
norm=plt.Normalize(vmin=0, vmax=len(data_cloud))
)
sm.set_array(time)
cb = plt.colorbar(sm)
cb.set_label("Iteration")
plt.scatter(
weights_course[:, 0],
weights_course[:, 1],
facecolor=colors,
edgecolor="none",
lw=2
)
# ensure rectangular plot
x_min = data_cloud[:, 0].min()
x_max = data_cloud[:, 0].max()
y_min = data_cloud[:, 1].min()
y_max = data_cloud[:, 1].max()
lims = [min(x_min, y_min), max(x_max, y_max)]
plt.xlim(lims)
plt.ylim(lims)
plt.show()
def run_oja(n=2000, ratio=1., angle=0., learning_rate=0.01, do_plot=True):
"""Generates a point cloud and runs Oja's learning
rule once. Optionally plots the result.
Args:
n (int, optional): number of points in the cloud
ratio (float, optional): (std along the short axis) /
(std along the long axis)
angle (float, optional): rotation angle [deg]
do_plot (bool, optional): plot the result
"""
cloud = make_cloud(n=n, ratio=ratio, angle=angle)
wcourse = learn(cloud, eta=learning_rate)
if do_plot:
plot_oja_trace(cloud, wcourse)
return wcourse, cloud
if __name__ == "__main__":
run_oja(n=2000, ratio=1.1, angle=30, learning_rate=0.2)
|
gpl-2.0
|
chaluemwut/fbserver
|
venv/lib/python2.7/site-packages/sklearn/decomposition/tests/test_factor_analysis.py
|
20
|
3128
|
# Author: Christian Osendorfer <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD3
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
def test_factor_analysis():
"""Test FactorAnalysis ability to recover the data covariance structure
"""
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
assert_warns(DeprecationWarning, FactorAnalysis, verbose=1)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
|
apache-2.0
|
SuperSaiyanSSS/SinaWeiboSpider
|
ml/svm_utils.py
|
1
|
2502
|
# coding=utf-8
from __future__ import print_function
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
import re
import jieba
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn import svm
path_doc_root = 'H:\py\workplace\/a2\SogouC.reduced2\\Reduced' # 根目录 即存放按类分类好的问本纪
path_tmp = 'H:\py\workplace\/a2\SogouC.reduced2ss11\\temp1' # 存放中间结果的位置
path_dictionary = os.path.join(path_tmp, 'THUNews.dict')
path_tmp_tfidf = os.path.join(path_tmp, 'tfidf_corpus')
path_tmp_lsi = os.path.join(path_tmp, 'lsi_corpus')
path_tmp_lsimodel = os.path.join(path_tmp, 'lsi_model.pkl')
path_tmp_predictor = os.path.join(path_tmp, 'predictor.pkl')
def convert_doc_to_wordlist(str_doc,cut_all):
sent_list = str_doc.split('\n')
sent_list = map(rm_char, sent_list) # 去掉一些字符,例如\u3000
word_2dlist = [rm_tokens(jieba.cut(part,cut_all=cut_all)) for part in sent_list] # 分词
word_list = sum(word_2dlist,[])
return word_list
def rm_tokens(words): # 去掉一些停用次和数字
words_list = list(words)
stop_words = get_stop_words()
for i in range(words_list.__len__())[::-1]:
if words_list[i] in stop_words: # 去除停用词
words_list.pop(i)
elif words_list[i].isdigit():
words_list.pop(i)
return words_list
def get_stop_words(path='stopwords_cn.txt'):
file = open(path,'rb').read().split('\n')
return set(file)
def rm_char(text):
text = re.sub('\u3000','',text)
return text
def svm_classify(train_set, train_tag, test_set, test_tag):
clf = svm.LinearSVC()
clf_res = clf.fit(train_set, train_tag)
train_pred = clf_res.predict(train_set)
test_pred = clf_res.predict(test_set)
train_err_num, train_err_ratio = checkPred(train_tag, train_pred)
test_err_num, test_err_ratio = checkPred(test_tag, test_pred)
print('=== 分类训练完毕,分类结果如下 ===')
print('训练集误差: {e}'.format(e=train_err_ratio))
print('检验集误差: {e}'.format(e=test_err_ratio))
return clf_res
def checkPred(data_tag, data_pred):
if data_tag.__len__() != data_pred.__len__():
raise RuntimeError('The length of data tag and data pred should be the same')
err_count = 0
for i in range(data_tag.__len__()):
if data_tag[i]!=data_pred[i]:
err_count += 1
err_ratio = err_count / data_tag.__len__()
return [err_count, err_ratio]
|
mit
|
elenita1221/BDA_py_demos
|
demos_ch2/demo2_3.py
|
19
|
1931
|
"""Bayesian Data Analysis, 3rd ed
Chapter 2, demo 3
Simulate samples from Beta(438,544), draw a histogram with quantiles, and do
the same for a transformed variable.
"""
import numpy as np
from scipy.stats import beta
import matplotlib.pyplot as plt
# Edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2)
plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a',
'#984ea3','#ff7f00','#ffff33'))
# Plotting grid
x = np.linspace(0.36, 0.54, 150)
# Draw n random samples from Beta(438,544)
n = 10000
th = beta.rvs(438, 544, size=n) # rvs comes from `random variates`
# Plot 2 subplots
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(8, 10))
# Plot histogram
axes[0].hist(th, bins=30)
# Compute 2.5% and 97.5% quantile approximation using samples
th25, th975 = np.percentile(th, [2.5, 97.5])
# Draw lines for these
axes[0].axvline(th25, color='#e41a1c', linewidth=1.5)
axes[0].axvline(th975, color='#e41a1c', linewidth=1.5)
axes[0].text(th25, axes[0].get_ylim()[1]+15, '2.5%',
horizontalalignment='center')
axes[0].text(th975, axes[0].get_ylim()[1]+15, '97.5%',
horizontalalignment='center')
axes[0].set_xlabel(r'$\theta$', fontsize=18)
axes[0].set_yticks(())
# Plot histogram for the transformed variable
phi = (1-th)/th
axes[1].hist(phi, bins=30)
# Compute 2.5% and 97.5% quantile approximation using samples
phi25, phi975 = np.percentile(phi, [2.5, 97.5])
# Draw lines for these
axes[1].axvline(phi25, color='#e41a1c', linewidth=1.5)
axes[1].axvline(phi975, color='#e41a1c', linewidth=1.5)
axes[1].text(phi25, axes[1].get_ylim()[1]+15, '2.5%',
horizontalalignment='center')
axes[1].text(phi975, axes[1].get_ylim()[1]+15, '97.5%',
horizontalalignment='center')
axes[1].set_xlabel(r'$\phi$', fontsize=18)
axes[1].set_yticks(())
# Display the figure
plt.show()
|
gpl-3.0
|
kdebrab/pandas
|
pandas/tests/groupby/aggregate/test_aggregate.py
|
4
|
9532
|
# -*- coding: utf-8 -*-
"""
test .agg behavior / note that .apply is tested generally in test_groupby.py
"""
import pytest
import numpy as np
import pandas as pd
from pandas import concat, DataFrame, Index, MultiIndex, Series
from pandas.core.groupby.grouper import Grouping
from pandas.core.base import SpecificationError
from pandas.compat import OrderedDict
import pandas.util.testing as tm
def test_agg_regression1(tsframe):
grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_agg_must_agg(df):
grouped = df.groupby('A')['C']
msg = "Must produce aggregated value"
with tm.assert_raises_regex(Exception, msg):
grouped.agg(lambda x: x.describe())
with tm.assert_raises_regex(Exception, msg):
grouped.agg(lambda x: x.index[:2])
def test_agg_ser_multi_key(df):
# TODO(wesm): unused
ser = df.C # noqa
f = lambda x: x.sum()
results = df.C.groupby([df.A, df.B]).aggregate(f)
expected = df.groupby(['A', 'B']).sum()['C']
tm.assert_series_equal(results, expected)
def test_groupby_aggregation_mixed_dtype():
# GH 6212
expected = DataFrame({
'v1': [5, 5, 7, np.nan, 3, 3, 4, 1],
'v2': [55, 55, 77, np.nan, 33, 33, 44, 11]},
index=MultiIndex.from_tuples([(1, 95), (1, 99), (2, 95), (2, 99),
('big', 'damp'),
('blue', 'dry'),
('red', 'red'), ('red', 'wet')],
names=['by1', 'by2']))
df = DataFrame({
'v1': [1, 3, 5, 7, 8, 3, 5, np.nan, 4, 5, 7, 9],
'v2': [11, 33, 55, 77, 88, 33, 55, np.nan, 44, 55, 77, 99],
'by1': ["red", "blue", 1, 2, np.nan, "big", 1, 2, "red", 1, np.nan,
12],
'by2': ["wet", "dry", 99, 95, np.nan, "damp", 95, 99, "red", 99,
np.nan, np.nan]
})
g = df.groupby(['by1', 'by2'])
result = g[['v1', 'v2']].mean()
tm.assert_frame_equal(result, expected)
def test_agg_apply_corner(ts, tsframe):
# nothing to group, all NA
grouped = ts.groupby(ts * np.nan)
assert ts.dtype == np.float64
# groupby float64 values results in Float64Index
exp = Series([], dtype=np.float64,
index=pd.Index([], dtype=np.float64))
tm.assert_series_equal(grouped.sum(), exp)
tm.assert_series_equal(grouped.agg(np.sum), exp)
tm.assert_series_equal(grouped.apply(np.sum), exp,
check_index_type=False)
# DataFrame
grouped = tsframe.groupby(tsframe['A'] * np.nan)
exp_df = DataFrame(columns=tsframe.columns, dtype=float,
index=pd.Index([], dtype=np.float64))
tm.assert_frame_equal(grouped.sum(), exp_df, check_names=False)
tm.assert_frame_equal(grouped.agg(np.sum), exp_df, check_names=False)
tm.assert_frame_equal(grouped.apply(np.sum), exp_df.iloc[:, :0],
check_names=False)
def test_agg_grouping_is_list_tuple(ts):
df = tm.makeTimeDataFrame()
grouped = df.groupby(lambda x: x.year)
grouper = grouped.grouper.groupings[0].grouper
grouped.grouper.groupings[0] = Grouping(ts.index, list(grouper))
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
grouped.grouper.groupings[0] = Grouping(ts.index, tuple(grouper))
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_agg_python_multiindex(mframe):
grouped = mframe.groupby(['A', 'B'])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('groupbyfunc', [
lambda x: x.weekday(),
[lambda x: x.month, lambda x: x.weekday()],
])
def test_aggregate_str_func(tsframe, groupbyfunc):
grouped = tsframe.groupby(groupbyfunc)
# single series
result = grouped['A'].agg('std')
expected = grouped['A'].std()
tm.assert_series_equal(result, expected)
# group frame by function name
result = grouped.aggregate('var')
expected = grouped.var()
tm.assert_frame_equal(result, expected)
# group frame by function dict
result = grouped.agg(OrderedDict([['A', 'var'],
['B', 'std'],
['C', 'mean'],
['D', 'sem']]))
expected = DataFrame(OrderedDict([['A', grouped['A'].var()],
['B', grouped['B'].std()],
['C', grouped['C'].mean()],
['D', grouped['D'].sem()]]))
tm.assert_frame_equal(result, expected)
def test_aggregate_item_by_item(df):
grouped = df.groupby('A')
aggfun = lambda ser: ser.size
result = grouped.agg(aggfun)
foo = (df.A == 'foo').sum()
bar = (df.A == 'bar').sum()
K = len(result.columns)
# GH5782
# odd comparisons can result here, so cast to make easy
exp = pd.Series(np.array([foo] * K), index=list('BCD'),
dtype=np.float64, name='foo')
tm.assert_series_equal(result.xs('foo'), exp)
exp = pd.Series(np.array([bar] * K), index=list('BCD'),
dtype=np.float64, name='bar')
tm.assert_almost_equal(result.xs('bar'), exp)
def aggfun(ser):
return ser.size
result = DataFrame().groupby(df.A).agg(aggfun)
assert isinstance(result, DataFrame)
assert len(result) == 0
def test_wrap_agg_out(three_group):
grouped = three_group.groupby(['A', 'B'])
def func(ser):
if ser.dtype == np.object:
raise TypeError
else:
return ser.sum()
result = grouped.aggregate(func)
exp_grouped = three_group.loc[:, three_group.columns != 'C']
expected = exp_grouped.groupby(['A', 'B']).aggregate(func)
tm.assert_frame_equal(result, expected)
def test_agg_multiple_functions_maintain_order(df):
# GH #610
funcs = [('mean', np.mean), ('max', np.max), ('min', np.min)]
result = df.groupby('A')['C'].agg(funcs)
exp_cols = Index(['mean', 'max', 'min'])
tm.assert_index_equal(result.columns, exp_cols)
def test_multiple_functions_tuples_and_non_tuples(df):
# #1359
funcs = [('foo', 'mean'), 'std']
ex_funcs = [('foo', 'mean'), ('std', 'std')]
result = df.groupby('A')['C'].agg(funcs)
expected = df.groupby('A')['C'].agg(ex_funcs)
tm.assert_frame_equal(result, expected)
result = df.groupby('A').agg(funcs)
expected = df.groupby('A').agg(ex_funcs)
tm.assert_frame_equal(result, expected)
def test_agg_multiple_functions_too_many_lambdas(df):
grouped = df.groupby('A')
funcs = ['mean', lambda x: x.mean(), lambda x: x.std()]
msg = 'Function names must be unique, found multiple named <lambda>'
with tm.assert_raises_regex(SpecificationError, msg):
grouped.agg(funcs)
def test_more_flexible_frame_multi_function(df):
grouped = df.groupby('A')
exmean = grouped.agg(OrderedDict([['C', np.mean], ['D', np.mean]]))
exstd = grouped.agg(OrderedDict([['C', np.std], ['D', np.std]]))
expected = concat([exmean, exstd], keys=['mean', 'std'], axis=1)
expected = expected.swaplevel(0, 1, axis=1).sort_index(level=0, axis=1)
d = OrderedDict([['C', [np.mean, np.std]], ['D', [np.mean, np.std]]])
result = grouped.aggregate(d)
tm.assert_frame_equal(result, expected)
# be careful
result = grouped.aggregate(OrderedDict([['C', np.mean],
['D', [np.mean, np.std]]]))
expected = grouped.aggregate(OrderedDict([['C', np.mean],
['D', [np.mean, np.std]]]))
tm.assert_frame_equal(result, expected)
def foo(x):
return np.mean(x)
def bar(x):
return np.std(x, ddof=1)
# this uses column selection & renaming
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
d = OrderedDict([['C', np.mean],
['D', OrderedDict([['foo', np.mean],
['bar', np.std]])]])
result = grouped.aggregate(d)
d = OrderedDict([['C', [np.mean]], ['D', [foo, bar]]])
expected = grouped.aggregate(d)
tm.assert_frame_equal(result, expected)
def test_multi_function_flexible_mix(df):
# GH #1268
grouped = df.groupby('A')
# Expected
d = OrderedDict([['C', OrderedDict([['foo', 'mean'], ['bar', 'std']])],
['D', {'sum': 'sum'}]])
# this uses column selection & renaming
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
expected = grouped.aggregate(d)
# Test 1
d = OrderedDict([['C', OrderedDict([['foo', 'mean'], ['bar', 'std']])],
['D', 'sum']])
# this uses column selection & renaming
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = grouped.aggregate(d)
tm.assert_frame_equal(result, expected)
# Test 2
d = OrderedDict([['C', OrderedDict([['foo', 'mean'], ['bar', 'std']])],
['D', ['sum']]])
# this uses column selection & renaming
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = grouped.aggregate(d)
tm.assert_frame_equal(result, expected)
|
bsd-3-clause
|
toolforger/sympy
|
sympy/physics/quantum/state.py
|
58
|
29186
|
"""Dirac notation for states."""
from __future__ import print_function, division
from sympy import (cacheit, conjugate, Expr, Function, integrate, oo, sqrt,
Tuple)
from sympy.core.compatibility import u, range
from sympy.printing.pretty.stringpict import stringPict
from sympy.physics.quantum.qexpr import QExpr, dispatch_method
__all__ = [
'KetBase',
'BraBase',
'StateBase',
'State',
'Ket',
'Bra',
'TimeDepState',
'TimeDepBra',
'TimeDepKet',
'Wavefunction'
]
#-----------------------------------------------------------------------------
# States, bras and kets.
#-----------------------------------------------------------------------------
# ASCII brackets
_lbracket = "<"
_rbracket = ">"
_straight_bracket = "|"
# Unicode brackets
# MATHEMATICAL ANGLE BRACKETS
_lbracket_ucode = u("\N{MATHEMATICAL LEFT ANGLE BRACKET}")
_rbracket_ucode = u("\N{MATHEMATICAL RIGHT ANGLE BRACKET}")
# LIGHT VERTICAL BAR
_straight_bracket_ucode = u("\N{LIGHT VERTICAL BAR}")
# Other options for unicode printing of <, > and | for Dirac notation.
# LEFT-POINTING ANGLE BRACKET
# _lbracket = u"\u2329"
# _rbracket = u"\u232A"
# LEFT ANGLE BRACKET
# _lbracket = u"\u3008"
# _rbracket = u"\u3009"
# VERTICAL LINE
# _straight_bracket = u"\u007C"
class StateBase(QExpr):
"""Abstract base class for general abstract states in quantum mechanics.
All other state classes defined will need to inherit from this class. It
carries the basic structure for all other states such as dual, _eval_adjoint
and label.
This is an abstract base class and you should not instantiate it directly,
instead use State.
"""
@classmethod
def _operators_to_state(self, ops, **options):
""" Returns the eigenstate instance for the passed operators.
This method should be overridden in subclasses. It will handle being
passed either an Operator instance or set of Operator instances. It
should return the corresponding state INSTANCE or simply raise a
NotImplementedError. See cartesian.py for an example.
"""
raise NotImplementedError("Cannot map operators to states in this class. Method not implemented!")
def _state_to_operators(self, op_classes, **options):
""" Returns the operators which this state instance is an eigenstate
of.
This method should be overridden in subclasses. It will be called on
state instances and be passed the operator classes that we wish to make
into instances. The state instance will then transform the classes
appropriately, or raise a NotImplementedError if it cannot return
operator instances. See cartesian.py for examples,
"""
raise NotImplementedError(
"Cannot map this state to operators. Method not implemented!")
@property
def operators(self):
"""Return the operator(s) that this state is an eigenstate of"""
from .operatorset import state_to_operators # import internally to avoid circular import errors
return state_to_operators(self)
def _enumerate_state(self, num_states, **options):
raise NotImplementedError("Cannot enumerate this state!")
def _represent_default_basis(self, **options):
return self._represent(basis=self.operators)
#-------------------------------------------------------------------------
# Dagger/dual
#-------------------------------------------------------------------------
@property
def dual(self):
"""Return the dual state of this one."""
return self.dual_class()._new_rawargs(self.hilbert_space, *self.args)
@classmethod
def dual_class(self):
"""Return the class used to construt the dual."""
raise NotImplementedError(
'dual_class must be implemented in a subclass'
)
def _eval_adjoint(self):
"""Compute the dagger of this state using the dual."""
return self.dual
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _pretty_brackets(self, height, use_unicode=True):
# Return pretty printed brackets for the state
# Ideally, this could be done by pform.parens but it does not support the angled < and >
# Setup for unicode vs ascii
if use_unicode:
lbracket, rbracket = self.lbracket_ucode, self.rbracket_ucode
slash, bslash, vert = u('\N{BOX DRAWINGS LIGHT DIAGONAL UPPER RIGHT TO LOWER LEFT}'), \
u('\N{BOX DRAWINGS LIGHT DIAGONAL UPPER LEFT TO LOWER RIGHT}'), \
u('\N{BOX DRAWINGS LIGHT VERTICAL}')
else:
lbracket, rbracket = self.lbracket, self.rbracket
slash, bslash, vert = '/', '\\', '|'
# If height is 1, just return brackets
if height == 1:
return stringPict(lbracket), stringPict(rbracket)
# Make height even
height += (height % 2)
brackets = []
for bracket in lbracket, rbracket:
# Create left bracket
if bracket in set([_lbracket, _lbracket_ucode]):
bracket_args = [ ' ' * (height//2 - i - 1) +
slash for i in range(height // 2)]
bracket_args.extend(
[ ' ' * i + bslash for i in range(height // 2)])
# Create right bracket
elif bracket in set([_rbracket, _rbracket_ucode]):
bracket_args = [ ' ' * i + bslash for i in range(height // 2)]
bracket_args.extend([ ' ' * (
height//2 - i - 1) + slash for i in range(height // 2)])
# Create straight bracket
elif bracket in set([_straight_bracket, _straight_bracket_ucode]):
bracket_args = [vert for i in range(height)]
else:
raise ValueError(bracket)
brackets.append(
stringPict('\n'.join(bracket_args), baseline=height//2))
return brackets
def _sympystr(self, printer, *args):
contents = self._print_contents(printer, *args)
return '%s%s%s' % (self.lbracket, contents, self.rbracket)
def _pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
# Get brackets
pform = self._print_contents_pretty(printer, *args)
lbracket, rbracket = self._pretty_brackets(
pform.height(), printer._use_unicode)
# Put together state
pform = prettyForm(*pform.left(lbracket))
pform = prettyForm(*pform.right(rbracket))
return pform
def _latex(self, printer, *args):
contents = self._print_contents_latex(printer, *args)
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
return '{%s%s%s}' % (self.lbracket_latex, contents, self.rbracket_latex)
class KetBase(StateBase):
"""Base class for Kets.
This class defines the dual property and the brackets for printing. This is
an abstract base class and you should not instantiate it directly, instead
use Ket.
"""
lbracket = _straight_bracket
rbracket = _rbracket
lbracket_ucode = _straight_bracket_ucode
rbracket_ucode = _rbracket_ucode
lbracket_latex = r'\left|'
rbracket_latex = r'\right\rangle '
@classmethod
def default_args(self):
return ("psi",)
@classmethod
def dual_class(self):
return BraBase
def __mul__(self, other):
"""KetBase*other"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, BraBase):
return OuterProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*KetBase"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, BraBase):
return InnerProduct(other, self)
else:
return Expr.__rmul__(self, other)
#-------------------------------------------------------------------------
# _eval_* methods
#-------------------------------------------------------------------------
def _eval_innerproduct(self, bra, **hints):
"""Evaluate the inner product betweeen this ket and a bra.
This is called to compute <bra|ket>, where the ket is ``self``.
This method will dispatch to sub-methods having the format::
``def _eval_innerproduct_BraClass(self, **hints):``
Subclasses should define these methods (one for each BraClass) to
teach the ket how to take inner products with bras.
"""
return dispatch_method(self, '_eval_innerproduct', bra, **hints)
def _apply_operator(self, op, **options):
"""Apply an Operator to this Ket.
This method will dispatch to methods having the format::
``def _apply_operator_OperatorName(op, **options):``
Subclasses should define these methods (one for each OperatorName) to
teach the Ket how operators act on it.
Parameters
==========
op : Operator
The Operator that is acting on the Ket.
options : dict
A dict of key/value pairs that control how the operator is applied
to the Ket.
"""
return dispatch_method(self, '_apply_operator', op, **options)
class BraBase(StateBase):
"""Base class for Bras.
This class defines the dual property and the brackets for printing. This
is an abstract base class and you should not instantiate it directly,
instead use Bra.
"""
lbracket = _lbracket
rbracket = _straight_bracket
lbracket_ucode = _lbracket_ucode
rbracket_ucode = _straight_bracket_ucode
lbracket_latex = r'\left\langle '
rbracket_latex = r'\right|'
@classmethod
def _operators_to_state(self, ops, **options):
state = self.dual_class().operators_to_state(ops, **options)
return state.dual
def _state_to_operators(self, op_classes, **options):
return self.dual._state_to_operators(op_classes, **options)
def _enumerate_state(self, num_states, **options):
dual_states = self.dual._enumerate_state(num_states, **options)
return [x.dual for x in dual_states]
@classmethod
def default_args(self):
return self.dual_class().default_args()
@classmethod
def dual_class(self):
return KetBase
def __mul__(self, other):
"""BraBase*other"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, KetBase):
return InnerProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*BraBase"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, KetBase):
return OuterProduct(other, self)
else:
return Expr.__rmul__(self, other)
def _represent(self, **options):
"""A default represent that uses the Ket's version."""
from sympy.physics.quantum.dagger import Dagger
return Dagger(self.dual._represent(**options))
class State(StateBase):
"""General abstract quantum state used as a base class for Ket and Bra."""
pass
class Ket(State, KetBase):
"""A general time-independent Ket in quantum mechanics.
Inherits from State and KetBase. This class should be used as the base
class for all physical, time-independent Kets in a system. This class
and its subclasses will be the main classes that users will use for
expressing Kets in Dirac notation [1]_.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Ket and looking at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> k = Ket('psi')
>>> k
|psi>
>>> k.hilbert_space
H
>>> k.is_commutative
False
>>> k.label
(psi,)
Ket's know about their associated bra::
>>> k.dual
<psi|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.Bra'>
Take a linear combination of two kets::
>>> k0 = Ket(0)
>>> k1 = Ket(1)
>>> 2*I*k0 - 4*k1
2*I*|0> - 4*|1>
Compound labels are passed as tuples::
>>> n, m = symbols('n,m')
>>> k = Ket(n,m)
>>> k
|nm>
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Bra
class Bra(State, BraBase):
"""A general time-independent Bra in quantum mechanics.
Inherits from State and BraBase. A Bra is the dual of a Ket [1]_. This
class and its subclasses will be the main classes that users will use for
expressing Bras in Dirac notation.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Bra and look at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> b = Bra('psi')
>>> b
<psi|
>>> b.hilbert_space
H
>>> b.is_commutative
False
Bra's know about their dual Ket's::
>>> b.dual
|psi>
>>> b.dual_class()
<class 'sympy.physics.quantum.state.Ket'>
Like Kets, Bras can have compound labels and be manipulated in a similar
manner::
>>> n, m = symbols('n,m')
>>> b = Bra(n,m) - I*Bra(m,n)
>>> b
-I*<mn| + <nm|
Symbols in a Bra can be substituted using ``.subs``::
>>> b.subs(n,m)
<mm| - I*<mm|
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Ket
#-----------------------------------------------------------------------------
# Time dependent states, bras and kets.
#-----------------------------------------------------------------------------
class TimeDepState(StateBase):
"""Base class for a general time-dependent quantum state.
This class is used as a base class for any time-dependent state. The main
difference between this class and the time-independent state is that this
class takes a second argument that is the time in addition to the usual
label argument.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
"""
#-------------------------------------------------------------------------
# Initialization
#-------------------------------------------------------------------------
@classmethod
def default_args(self):
return ("psi", "t")
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def label(self):
"""The label of the state."""
return self.args[:-1]
@property
def time(self):
"""The time of the state."""
return self.args[-1]
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _print_time(self, printer, *args):
return printer._print(self.time, *args)
_print_time_repr = _print_time
_print_time_latex = _print_time
def _print_time_pretty(self, printer, *args):
pform = printer._print(self.time, *args)
return pform
def _print_contents(self, printer, *args):
label = self._print_label(printer, *args)
time = self._print_time(printer, *args)
return '%s;%s' % (label, time)
def _print_label_repr(self, printer, *args):
label = self._print_sequence(self.label, ',', printer, *args)
time = self._print_time_repr(printer, *args)
return '%s,%s' % (label, time)
def _print_contents_pretty(self, printer, *args):
label = self._print_label_pretty(printer, *args)
time = self._print_time_pretty(printer, *args)
return printer._print_seq((label, time), delimiter=';')
def _print_contents_latex(self, printer, *args):
label = self._print_sequence(
self.label, self._label_separator, printer, *args)
time = self._print_time_latex(printer, *args)
return '%s;%s' % (label, time)
class TimeDepKet(TimeDepState, KetBase):
"""General time-dependent Ket in quantum mechanics.
This inherits from ``TimeDepState`` and ``KetBase`` and is the main class
that should be used for Kets that vary with time. Its dual is a
``TimeDepBra``.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
Create a TimeDepKet and look at its attributes::
>>> from sympy.physics.quantum import TimeDepKet
>>> k = TimeDepKet('psi', 't')
>>> k
|psi;t>
>>> k.time
t
>>> k.label
(psi,)
>>> k.hilbert_space
H
TimeDepKets know about their dual bra::
>>> k.dual
<psi;t|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.TimeDepBra'>
"""
@classmethod
def dual_class(self):
return TimeDepBra
class TimeDepBra(TimeDepState, BraBase):
"""General time-dependent Bra in quantum mechanics.
This inherits from TimeDepState and BraBase and is the main class that
should be used for Bras that vary with time. Its dual is a TimeDepBra.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
>>> from sympy.physics.quantum import TimeDepBra
>>> from sympy import symbols, I
>>> b = TimeDepBra('psi', 't')
>>> b
<psi;t|
>>> b.time
t
>>> b.label
(psi,)
>>> b.hilbert_space
H
>>> b.dual
|psi;t>
"""
@classmethod
def dual_class(self):
return TimeDepKet
class Wavefunction(Function):
"""Class for representations in continuous bases
This class takes an expression and coordinates in its constructor. It can
be used to easily calculate normalizations and probabilities.
Parameters
==========
expr : Expr
The expression representing the functional form of the w.f.
coords : Symbol or tuple
The coordinates to be integrated over, and their bounds
Examples
========
Particle in a box, specifying bounds in the more primitive way of using
Piecewise:
>>> from sympy import Symbol, Piecewise, pi, N
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = Symbol('x', real=True)
>>> n = 1
>>> L = 1
>>> g = Piecewise((0, x < 0), (0, x > L), (sqrt(2//L)*sin(n*pi*x/L), True))
>>> f = Wavefunction(g, x)
>>> f.norm
1
>>> f.is_normalized
True
>>> p = f.prob()
>>> p(0)
0
>>> p(L)
0
>>> p(0.5)
2
>>> p(0.85*L)
2*sin(0.85*pi)**2
>>> N(p(0.85*L))
0.412214747707527
Additionally, you can specify the bounds of the function and the indices in
a more compact way:
>>> from sympy import symbols, pi, diff
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> f(L+1)
0
>>> f(L-1)
sqrt(2)*sin(pi*n*(L - 1)/L)/sqrt(L)
>>> f(-1)
0
>>> f(0.85)
sqrt(2)*sin(0.85*pi*n/L)/sqrt(L)
>>> f(0.85, n=1, L=1)
sqrt(2)*sin(0.85*pi)
>>> f.is_commutative
False
All arguments are automatically sympified, so you can define the variables
as strings rather than symbols:
>>> expr = x**2
>>> f = Wavefunction(expr, 'x')
>>> type(f.variables[0])
<class 'sympy.core.symbol.Symbol'>
Derivatives of Wavefunctions will return Wavefunctions:
>>> diff(f, x)
Wavefunction(2*x, x)
"""
#Any passed tuples for coordinates and their bounds need to be
#converted to Tuples before Function's constructor is called, to
#avoid errors from calling is_Float in the constructor
def __new__(cls, *args, **options):
new_args = [None for i in args]
ct = 0
for arg in args:
if isinstance(arg, tuple):
new_args[ct] = Tuple(*arg)
else:
new_args[ct] = arg
ct += 1
return super(Function, cls).__new__(cls, *new_args, **options)
def __call__(self, *args, **options):
var = self.variables
if len(args) != len(var):
raise NotImplementedError(
"Incorrect number of arguments to function!")
ct = 0
#If the passed value is outside the specified bounds, return 0
for v in var:
lower, upper = self.limits[v]
#Do the comparison to limits only if the passed symbol is actually
#a symbol present in the limits;
#Had problems with a comparison of x > L
if isinstance(args[ct], Expr) and \
not (lower in args[ct].free_symbols
or upper in args[ct].free_symbols):
continue
if (args[ct] < lower) == True or (args[ct] > upper) == True:
return 0
ct += 1
expr = self.expr
#Allows user to make a call like f(2, 4, m=1, n=1)
for symbol in list(expr.free_symbols):
if str(symbol) in options.keys():
val = options[str(symbol)]
expr = expr.subs(symbol, val)
return expr.subs(zip(var, args))
def _eval_derivative(self, symbol):
expr = self.expr
deriv = expr._eval_derivative(symbol)
return Wavefunction(deriv, *self.args[1:])
def _eval_conjugate(self):
return Wavefunction(conjugate(self.expr), *self.args[1:])
def _eval_transpose(self):
return self
@property
def free_symbols(self):
return self.expr.free_symbols
@property
def is_commutative(self):
"""
Override Function's is_commutative so that order is preserved in
represented expressions
"""
return False
@classmethod
def eval(self, *args):
return None
@property
def variables(self):
"""
Return the coordinates which the wavefunction depends on
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x,y = symbols('x,y')
>>> f = Wavefunction(x*y, x, y)
>>> f.variables
(x, y)
>>> g = Wavefunction(x*y, x)
>>> g.variables
(x,)
"""
var = [g[0] if isinstance(g, Tuple) else g for g in self._args[1:]]
return tuple(var)
@property
def limits(self):
"""
Return the limits of the coordinates which the w.f. depends on If no
limits are specified, defaults to ``(-oo, oo)``.
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, (x, 0, 1))
>>> f.limits
{x: (0, 1)}
>>> f = Wavefunction(x**2, x)
>>> f.limits
{x: (-oo, oo)}
>>> f = Wavefunction(x**2 + y**2, x, (y, -1, 2))
>>> f.limits
{x: (-oo, oo), y: (-1, 2)}
"""
limits = [(g[1], g[2]) if isinstance(g, Tuple) else (-oo, oo)
for g in self._args[1:]]
return dict(zip(self.variables, tuple(limits)))
@property
def expr(self):
"""
Return the expression which is the functional form of the Wavefunction
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, x)
>>> f.expr
x**2
"""
return self._args[0]
@property
def is_normalized(self):
"""
Returns true if the Wavefunction is properly normalized
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.is_normalized
True
"""
return (self.norm == 1.0)
@property
@cacheit
def norm(self):
"""
Return the normalization of the specified functional form.
This function integrates over the coordinates of the Wavefunction, with
the bounds specified.
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
sqrt(2)*sqrt(L)/2
"""
exp = self.expr*conjugate(self.expr)
var = self.variables
limits = self.limits
for v in var:
curr_limits = limits[v]
exp = integrate(exp, (v, curr_limits[0], curr_limits[1]))
return sqrt(exp)
def normalize(self):
"""
Return a normalized version of the Wavefunction
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = symbols('x', real=True)
>>> L = symbols('L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.normalize()
Wavefunction(sqrt(2)*sin(pi*n*x/L)/sqrt(L), (x, 0, L))
"""
const = self.norm
if const == oo:
raise NotImplementedError("The function is not normalizable!")
else:
return Wavefunction((const)**(-1)*self.expr, *self.args[1:])
def prob(self):
"""
Return the absolute magnitude of the w.f., `|\psi(x)|^2`
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', real=True)
>>> n = symbols('n', integer=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.prob()
Wavefunction(sin(pi*n*x/L)**2, x)
"""
return Wavefunction(self.expr*conjugate(self.expr), *self.variables)
|
bsd-3-clause
|
quheng/scikit-learn
|
sklearn/linear_model/tests/test_sgd.py
|
68
|
43439
|
import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([[3, 2]])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([[-1, -1]])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([[3, 2]])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([[-1, -1]])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([[-1, -1]])
d = clf.decision_function([[-1, -1]])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([[3, 2]])
p = clf.predict_proba([[3, 2]])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([[-1, -1]])
p = clf.predict_proba([[-1, -1]])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([[3, 2]])
p = clf.predict_proba([[3, 2]])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function([x])
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba([x])
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
|
bsd-3-clause
|
miguelzuma/montepython_zuma
|
sphinx-documentation/conf.py
|
2
|
10271
|
# -*- coding: utf-8 -*-
#
# Monte Python documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 7 14:13:29 2013.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# Mock the classy wrapper module
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['classy', 'numpy', 'scipy', 'matplotlib',
'matplotlib.pyplot', 'scipy.linalg', 'scipy.constants']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(-1, os.path.abspath('../montepython'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode',
'sphinx.ext.autosummary','sphinx.ext.mathjax', 'sphinx.ext.napoleon']
#,'numpydoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Monte Python'
copyright = u'2013, Benjamin Audren'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
with open('../VERSION', 'r') as version_file:
# The full version, including alpha/beta/rc tags.
release = version_file.readline()
# The short X.Y version.
version = '.'.join(release.split('.')[:-1])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'pyramid'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {
#"documentwidth": '50em',
#"sidebarwidth": '15em'}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MontePythondoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'MontePython.tex', u'Monte Python Documentation',
u'Benjamin Audren', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'montepython', u'Monte Python Documentation',
[u'Benjamin Audren'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'MontePython', u'Monte Python Documentation',
u'Benjamin Audren', 'MontePython', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Monte Python'
epub_author = u'Benjamin Audren'
epub_publisher = u'Benjamin Audren'
epub_copyright = u'2013, Benjamin Audren'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
autoclass_content = 'both'
# Napoleon settings
# go to http://sphinxcontrib-napoleon.readthedocs.org/en/latest/
# to see what all this is about
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = True
napoleon_use_param = False
napoleon_use_rtype = False
|
mit
|
Evfro/polara
|
polara/preprocessing/dataframes.py
|
1
|
6113
|
import heapq
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from pandas.api.types import is_numeric_dtype
from polara.lib.sampler import split_top_continuous
from polara.tools.random import check_random_state
def reindex(raw_data, index, filter_invalid=True, names=None):
'''
Factorizes column values based on provided pandas index. Allows resetting
index names. Optionally drops rows with entries not present in the index.
'''
if isinstance(index, pd.Index):
index = [index]
if isinstance(names, str):
names = [names]
if isinstance(names, (list, tuple, pd.Index)):
for i, name in enumerate(names):
index[i].name = name
new_data = raw_data.assign(**{
idx.name: idx.get_indexer(raw_data[idx.name]) for idx in index
})
if filter_invalid:
# pandas returns -1 if label is not present in the index
# checking if -1 is present anywhere in data
maybe_invalid = new_data.eval(
' or '.join([f'{idx.name} == -1' for idx in index])
)
if maybe_invalid.any():
print(f'Filtered {maybe_invalid.sum()} invalid observations.')
new_data = new_data.loc[~maybe_invalid]
return new_data
def matrix_from_observations(
data,
userid='userid',
itemid='itemid',
user_index=None,
item_index=None,
feedback=None,
preserve_order=False,
shape=None,
dtype=None
):
'''
Encodes pandas dataframe into sparse matrix. If index is not provided,
returns new index mapping, which optionally preserves order of original data.
Automatically removes incosnistent data not present in the provided index.
'''
if (user_index is None) or (item_index is None):
useridx, user_index = pd.factorize(data[userid], sort=preserve_order)
itemidx, item_index = pd.factorize(data[itemid], sort=preserve_order)
user_index.name = userid
item_index.name = itemid
else:
data = reindex(data, (user_index, item_index), filter_invalid=True)
useridx = data[userid].values
itemidx = data[itemid].values
if shape is None:
shape = (len(user_index), len(item_index))
if feedback is None:
values = np.ones_like(itemidx, dtype=dtype)
else:
values = data[feedback].values
matrix = csr_matrix((values, (useridx, itemidx)), dtype=dtype, shape=shape)
return matrix, user_index, item_index
def split_holdout(
data,
userid = 'userid',
feedback = None,
sample_max_rated = False,
random_state = None
):
'''
Samples 1 item per every user according to the rule sample_max_rated.
It always shuffles the input data. The reason is that even if sampling
top-rated elements, there could be several items with the same top rating.
'''
idx_grouper = (
data
.sample(frac=1, random_state=random_state) # randomly permute data
.groupby(userid, as_index=False, sort=False)
)
if sample_max_rated: # take single item with the highest score
idx = idx_grouper[feedback].idxmax()
else: # data is already shuffled - simply take the 1st element
idx = idx_grouper.head(1).index # sample random element
observed = data.drop(idx.values)
holdout = data.loc[idx.values]
return observed, holdout
def sample_unseen_items(item_group, item_pool, n, random_state):
'Helper function to run on pandas dataframe grouper'
seen_items = item_group.values
candidates = np.setdiff1d(item_pool, seen_items, assume_unique=True)
return random_state.choice(candidates, n, replace=False)
def sample_unseen_interactions(
data,
item_pool,
n_random = 999,
random_state = None,
userid = 'userid',
itemid = 'itemid'
):
'''
Randomized sampling of unseen items per every user in data. Assumes data
was already preprocessed to contiguous index.
'''
random_state = check_random_state(random_state)
return (
data
.groupby(userid, sort=False)[itemid]
.apply(sample_unseen_items, item_pool, n_random, random_state)
)
def verify_split(train, test, random_holdout, feedback, userid='userid'):
if random_holdout:
return
hold_gr = test.set_index(userid)[feedback]
useridx = hold_gr.index
train_gr = train.query(f'{userid} in @useridx').groupby(userid)[feedback]
assert train_gr.apply(lambda x: x.le(hold_gr.loc[x.name]).all()).all()
def to_numeric_array(series):
if not is_numeric_dtype(series):
if not hasattr(series, 'cat'):
series = series.astype('category')
return series.cat.codes.values
return series.values
def split_earliest_last(data, userid='userid', priority='timestamp', copy=False):
'''
It helps avoiding "recommendations from future", when training set contains events that occur later than some events in the holdout and can therefore provide an oracle hint for the algorithm.
'''
topseq_idx, lowseq_idx, nonseq_idx = split_top_continuous(
to_numeric_array(data[userid]), data[priority].values
)
observed = data.iloc[lowseq_idx]
holdout = data.iloc[topseq_idx]
future = data.iloc[nonseq_idx]
if copy:
observed = observed.copy()
holdout = holdout.copy()
future = future.copy()
return observed, holdout, future
def filter_sessions_by_length(data, session_label='userid', min_session_length=3):
"""Filters users with insufficient number of items"""
if data.duplicated().any():
raise NotImplementedError
sz = data[session_label].value_counts(sort=False)
has_valid_session_length = sz >= min_session_length
if not has_valid_session_length.all():
valid_sessions = sz.index[has_valid_session_length]
new_data = data[data[session_label].isin(valid_sessions)].copy()
print('Sessions are filtered by length')
else:
new_data = data
return new_data
|
mit
|
paulsbrookes/cqed_sims_qutip
|
spectroscopy/steady_state_photon_hist.py
|
1
|
1932
|
import numpy as np
import yaml
from qutip import *
from pylab import *
from scipy.fftpack import fft
import matplotlib.pyplot as plt
import yaml
from scipy.interpolate import interp1d
from scipy.optimize import fsolve
from qutip.ui.progressbar import TextProgressBar
class Parameters:
def __init__(self, wc, wq, eps, g, chi, kappa, gamma, t_levels, c_levels):
self.wc = wc
self.wq = wq
self.eps = eps
self.g = g
self.chi = chi
self.gamma = gamma
self.kappa = kappa
self.t_levels = t_levels
self.c_levels = c_levels
def copy(self):
params = Parameters(self.wc, self.wq, self.eps, self.g, self.chi, self.kappa, self.gamma, self.t_levels, self.c_levels)
return params
def hamiltonian(params, wd):
a = tensor(destroy(params.c_levels), qeye(params.t_levels))
sm = tensor(qeye(params.c_levels), destroy(params.t_levels))
H = (params.wc - wd) * a.dag() * a + (params.wq - wd) * sm.dag() * sm \
+ params.chi * sm.dag() * sm * (sm.dag() * sm - 1) + params.g * (a.dag() * sm + a * sm.dag()) \
+ params.eps * (a + a.dag())
return H
def rho_ss_calc(args):
wd = args[0]
params = args[1]
a = tensor(destroy(params.c_levels), qeye(params.t_levels))
sm = tensor(qeye(params.c_levels), destroy(params.t_levels))
c_ops = []
c_ops.append(np.sqrt(params.kappa) * a)
c_ops.append(np.sqrt(params.gamma) * sm)
H = hamiltonian(params, wd)
rho_ss = steadystate(H, c_ops)
return rho_ss
if __name__ == '__main__':
#wc, wq, eps, g, chi, kappa, gamma, t_levels, c_levels
t_levels = 5
c_levels = 20
params = Parameters(10.4267, 9.39128, 0.004, 0.3096, -0.097, 0.00146, 0.000833, t_levels, c_levels)
wd = 10.50662
rho_ss = rho_ss_calc([wd, params])
rho_c_ss = rho_ss.ptrace(0)
index = np.arange(c_levels)
plt.bar(index, rho_c_ss.diag())
plt.show()
|
apache-2.0
|
wronk/mne-python
|
mne/io/fiff/tests/test_raw_fiff.py
|
1
|
47636
|
# Author: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import os
import os.path as op
import glob
from copy import deepcopy
import warnings
import itertools as itt
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
from nose.tools import assert_true, assert_raises, assert_not_equal
from mne.datasets import testing
from mne.io.constants import FIFF
from mne.io import Raw, RawArray, concatenate_raws, read_raw_fif
from mne.io.tests.test_raw import _test_concat, _test_raw_reader
from mne import (concatenate_events, find_events, equalize_channels,
compute_proj_raw, pick_types, pick_channels, create_info)
from mne.utils import (_TempDir, requires_pandas, slow_test,
requires_mne, run_subprocess, run_tests_if_main)
from mne.externals.six.moves import zip, cPickle as pickle
from mne.io.proc_history import _get_sss_rank
from mne.io.pick import _picks_by_type
from mne.annotations import Annotations
from mne.tests.common import assert_naming
warnings.simplefilter('always') # enable b/c these tests throw warnings
testing_path = testing.data_path(download=False)
data_dir = op.join(testing_path, 'MEG', 'sample')
fif_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif')
ms_fname = op.join(testing_path, 'SSS', 'test_move_anon_raw.fif')
base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data')
test_fif_fname = op.join(base_dir, 'test_raw.fif')
test_fif_gz_fname = op.join(base_dir, 'test_raw.fif.gz')
ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
ctf_comp_fname = op.join(base_dir, 'test_ctf_comp_raw.fif')
fif_bad_marked_fname = op.join(base_dir, 'test_withbads_raw.fif')
bad_file_works = op.join(base_dir, 'test_bads.txt')
bad_file_wrong = op.join(base_dir, 'test_wrong_bads.txt')
hp_fname = op.join(base_dir, 'test_chpi_raw_hp.txt')
hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
rng = np.random.RandomState(0)
def test_fix_types():
"""Test fixing of channel types
"""
for fname, change in ((hp_fif_fname, True), (test_fif_fname, False),
(ctf_fname, False)):
raw = Raw(fname)
mag_picks = pick_types(raw.info, meg='mag')
other_picks = np.setdiff1d(np.arange(len(raw.ch_names)), mag_picks)
# we don't actually have any files suffering from this problem, so
# fake it
if change:
for ii in mag_picks:
raw.info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T2
orig_types = np.array([ch['coil_type'] for ch in raw.info['chs']])
raw.fix_mag_coil_types()
new_types = np.array([ch['coil_type'] for ch in raw.info['chs']])
if not change:
assert_array_equal(orig_types, new_types)
else:
assert_array_equal(orig_types[other_picks], new_types[other_picks])
assert_true((orig_types[mag_picks] != new_types[mag_picks]).all())
assert_true((new_types[mag_picks] ==
FIFF.FIFFV_COIL_VV_MAG_T3).all())
def test_concat():
"""Test RawFIF concatenation
"""
# we trim the file to save lots of memory and some time
tempdir = _TempDir()
raw = read_raw_fif(test_fif_fname)
raw.crop(0, 2., copy=False)
test_name = op.join(tempdir, 'test_raw.fif')
raw.save(test_name)
# now run the standard test
_test_concat(read_raw_fif, test_name)
@testing.requires_testing_data
def test_hash_raw():
"""Test hashing raw objects
"""
raw = read_raw_fif(fif_fname)
assert_raises(RuntimeError, raw.__hash__)
raw = Raw(fif_fname).crop(0, 0.5, copy=False)
raw.load_data()
raw_2 = Raw(fif_fname).crop(0, 0.5, copy=False)
raw_2.load_data()
assert_equal(hash(raw), hash(raw_2))
# do NOT use assert_equal here, failing output is terrible
assert_equal(pickle.dumps(raw), pickle.dumps(raw_2))
raw_2._data[0, 0] -= 1
assert_not_equal(hash(raw), hash(raw_2))
@testing.requires_testing_data
def test_maxshield():
"""Test maxshield warning
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
Raw(ms_fname, allow_maxshield=True)
assert_equal(len(w), 1)
assert_true('test_raw_fiff.py' in w[0].filename)
@testing.requires_testing_data
def test_subject_info():
"""Test reading subject information
"""
tempdir = _TempDir()
raw = Raw(fif_fname).crop(0, 1, copy=False)
assert_true(raw.info['subject_info'] is None)
# fake some subject data
keys = ['id', 'his_id', 'last_name', 'first_name', 'birthday', 'sex',
'hand']
vals = [1, 'foobar', 'bar', 'foo', (1901, 2, 3), 0, 1]
subject_info = dict()
for key, val in zip(keys, vals):
subject_info[key] = val
raw.info['subject_info'] = subject_info
out_fname = op.join(tempdir, 'test_subj_info_raw.fif')
raw.save(out_fname, overwrite=True)
raw_read = Raw(out_fname)
for key in keys:
assert_equal(subject_info[key], raw_read.info['subject_info'][key])
assert_equal(raw.info['meas_date'], raw_read.info['meas_date'])
raw.anonymize()
raw.save(out_fname, overwrite=True)
raw_read = Raw(out_fname)
for this_raw in (raw, raw_read):
assert_true(this_raw.info.get('subject_info') is None)
assert_equal(this_raw.info['meas_date'], [0, 0])
assert_equal(raw.info['file_id']['secs'], 0)
assert_equal(raw.info['meas_id']['secs'], 0)
# When we write out with raw.save, these get overwritten with the
# new save time
assert_true(raw_read.info['file_id']['secs'] > 0)
assert_true(raw_read.info['meas_id']['secs'] > 0)
@testing.requires_testing_data
def test_copy_append():
"""Test raw copying and appending combinations
"""
raw = Raw(fif_fname, preload=True).copy()
raw_full = Raw(fif_fname)
raw_full.append(raw)
data = raw_full[:, :][0]
assert_equal(data.shape[1], 2 * raw._data.shape[1])
@slow_test
@testing.requires_testing_data
def test_rank_estimation():
"""Test raw rank estimation
"""
iter_tests = itt.product(
[fif_fname, hp_fif_fname], # sss
['norm', dict(mag=1e11, grad=1e9, eeg=1e5)]
)
for fname, scalings in iter_tests:
raw = Raw(fname)
(_, picks_meg), (_, picks_eeg) = _picks_by_type(raw.info,
meg_combined=True)
n_meg = len(picks_meg)
n_eeg = len(picks_eeg)
raw = Raw(fname, preload=True)
if 'proc_history' not in raw.info:
expected_rank = n_meg + n_eeg
else:
mf = raw.info['proc_history'][0]['max_info']
expected_rank = _get_sss_rank(mf) + n_eeg
assert_array_equal(raw.estimate_rank(scalings=scalings), expected_rank)
assert_array_equal(raw.estimate_rank(picks=picks_eeg,
scalings=scalings),
n_eeg)
raw = Raw(fname, preload=False)
if 'sss' in fname:
tstart, tstop = 0., 30.
raw.add_proj(compute_proj_raw(raw))
raw.apply_proj()
else:
tstart, tstop = 10., 20.
raw.apply_proj()
n_proj = len(raw.info['projs'])
assert_array_equal(raw.estimate_rank(tstart=tstart, tstop=tstop,
scalings=scalings),
expected_rank - (1 if 'sss' in fname else n_proj))
@testing.requires_testing_data
def test_output_formats():
"""Test saving and loading raw data using multiple formats
"""
tempdir = _TempDir()
formats = ['short', 'int', 'single', 'double']
tols = [1e-4, 1e-7, 1e-7, 1e-15]
# let's fake a raw file with different formats
raw = Raw(test_fif_fname).crop(0, 1, copy=False)
temp_file = op.join(tempdir, 'raw.fif')
for ii, (fmt, tol) in enumerate(zip(formats, tols)):
# Let's test the overwriting error throwing while we're at it
if ii > 0:
assert_raises(IOError, raw.save, temp_file, fmt=fmt)
raw.save(temp_file, fmt=fmt, overwrite=True)
raw2 = Raw(temp_file)
raw2_data = raw2[:, :][0]
assert_allclose(raw2_data, raw[:, :][0], rtol=tol, atol=1e-25)
assert_equal(raw2.orig_format, fmt)
def _compare_combo(raw, new, times, n_times):
for ti in times: # let's do a subset of points for speed
orig = raw[:, ti % n_times][0]
# these are almost_equals because of possible dtype differences
assert_allclose(orig, new[:, ti][0])
@slow_test
@testing.requires_testing_data
def test_multiple_files():
"""Test loading multiple files simultaneously
"""
# split file
tempdir = _TempDir()
raw = Raw(fif_fname).crop(0, 10)
raw.load_data()
raw.load_data() # test no operation
split_size = 3. # in seconds
sfreq = raw.info['sfreq']
nsamp = (raw.last_samp - raw.first_samp)
tmins = np.round(np.arange(0., nsamp, split_size * sfreq))
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp]))
tmaxs /= sfreq
tmins /= sfreq
assert_equal(raw.n_times, len(raw.times))
# going in reverse order so the last fname is the first file (need later)
raws = [None] * len(tmins)
for ri in range(len(tmins) - 1, -1, -1):
fname = op.join(tempdir, 'test_raw_split-%d_raw.fif' % ri)
raw.save(fname, tmin=tmins[ri], tmax=tmaxs[ri])
raws[ri] = Raw(fname)
events = [find_events(r, stim_channel='STI 014') for r in raws]
last_samps = [r.last_samp for r in raws]
first_samps = [r.first_samp for r in raws]
# test concatenation of split file
assert_raises(ValueError, concatenate_raws, raws, True, events[1:])
all_raw_1, events1 = concatenate_raws(raws, preload=False,
events_list=events)
assert_equal(raw.first_samp, all_raw_1.first_samp)
assert_equal(raw.last_samp, all_raw_1.last_samp)
assert_allclose(raw[:, :][0], all_raw_1[:, :][0])
raws[0] = Raw(fname)
all_raw_2 = concatenate_raws(raws, preload=True)
assert_allclose(raw[:, :][0], all_raw_2[:, :][0])
# test proper event treatment for split files
events2 = concatenate_events(events, first_samps, last_samps)
events3 = find_events(all_raw_2, stim_channel='STI 014')
assert_array_equal(events1, events2)
assert_array_equal(events1, events3)
# test various methods of combining files
raw = Raw(fif_fname, preload=True)
n_times = raw.n_times
# make sure that all our data match
times = list(range(0, 2 * n_times, 999))
# add potentially problematic points
times.extend([n_times - 1, n_times, 2 * n_times - 1])
raw_combo0 = Raw([fif_fname, fif_fname], preload=True)
_compare_combo(raw, raw_combo0, times, n_times)
raw_combo = Raw([fif_fname, fif_fname], preload=False)
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = Raw([fif_fname, fif_fname], preload='memmap8.dat')
_compare_combo(raw, raw_combo, times, n_times)
assert_raises(ValueError, Raw, [fif_fname, ctf_fname])
assert_raises(ValueError, Raw, [fif_fname, fif_bad_marked_fname])
assert_equal(raw[:, :][0].shape[1] * 2, raw_combo0[:, :][0].shape[1])
assert_equal(raw_combo0[:, :][0].shape[1], raw_combo0.n_times)
# with all data preloaded, result should be preloaded
raw_combo = Raw(fif_fname, preload=True)
raw_combo.append(Raw(fif_fname, preload=True))
assert_true(raw_combo.preload is True)
assert_equal(raw_combo.n_times, raw_combo._data.shape[1])
_compare_combo(raw, raw_combo, times, n_times)
# with any data not preloaded, don't set result as preloaded
raw_combo = concatenate_raws([Raw(fif_fname, preload=True),
Raw(fif_fname, preload=False)])
assert_true(raw_combo.preload is False)
assert_array_equal(find_events(raw_combo, stim_channel='STI 014'),
find_events(raw_combo0, stim_channel='STI 014'))
_compare_combo(raw, raw_combo, times, n_times)
# user should be able to force data to be preloaded upon concat
raw_combo = concatenate_raws([Raw(fif_fname, preload=False),
Raw(fif_fname, preload=True)],
preload=True)
assert_true(raw_combo.preload is True)
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([Raw(fif_fname, preload=False),
Raw(fif_fname, preload=True)],
preload='memmap3.dat')
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([Raw(fif_fname, preload=True),
Raw(fif_fname, preload=True)],
preload='memmap4.dat')
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([Raw(fif_fname, preload=False),
Raw(fif_fname, preload=False)],
preload='memmap5.dat')
_compare_combo(raw, raw_combo, times, n_times)
# verify that combining raws with different projectors throws an exception
raw.add_proj([], remove_existing=True)
assert_raises(ValueError, raw.append, Raw(fif_fname, preload=True))
# now test event treatment for concatenated raw files
events = [find_events(raw, stim_channel='STI 014'),
find_events(raw, stim_channel='STI 014')]
last_samps = [raw.last_samp, raw.last_samp]
first_samps = [raw.first_samp, raw.first_samp]
events = concatenate_events(events, first_samps, last_samps)
events2 = find_events(raw_combo0, stim_channel='STI 014')
assert_array_equal(events, events2)
# check out the len method
assert_equal(len(raw), raw.n_times)
assert_equal(len(raw), raw.last_samp - raw.first_samp + 1)
@testing.requires_testing_data
def test_split_files():
"""Test writing and reading of split raw files
"""
tempdir = _TempDir()
raw_1 = Raw(fif_fname, preload=True)
assert_allclose(raw_1.info['buffer_size_sec'], 10., atol=1e-2) # samp rate
split_fname = op.join(tempdir, 'split_raw.fif')
raw_1.save(split_fname, buffer_size_sec=1.0, split_size='10MB')
raw_2 = Raw(split_fname)
assert_allclose(raw_2.info['buffer_size_sec'], 1., atol=1e-2) # samp rate
data_1, times_1 = raw_1[:, :]
data_2, times_2 = raw_2[:, :]
assert_array_equal(data_1, data_2)
assert_array_equal(times_1, times_2)
# test the case where the silly user specifies the split files
fnames = [split_fname]
fnames.extend(sorted(glob.glob(op.join(tempdir, 'split_raw-*.fif'))))
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
raw_2 = Raw(fnames)
data_2, times_2 = raw_2[:, :]
assert_array_equal(data_1, data_2)
assert_array_equal(times_1, times_2)
def test_load_bad_channels():
"""Test reading/writing of bad channels
"""
tempdir = _TempDir()
# Load correctly marked file (manually done in mne_process_raw)
raw_marked = Raw(fif_bad_marked_fname)
correct_bads = raw_marked.info['bads']
raw = Raw(test_fif_fname)
# Make sure it starts clean
assert_array_equal(raw.info['bads'], [])
# Test normal case
raw.load_bad_channels(bad_file_works)
# Write it out, read it in, and check
raw.save(op.join(tempdir, 'foo_raw.fif'))
raw_new = Raw(op.join(tempdir, 'foo_raw.fif'))
assert_equal(correct_bads, raw_new.info['bads'])
# Reset it
raw.info['bads'] = []
# Test bad case
assert_raises(ValueError, raw.load_bad_channels, bad_file_wrong)
# Test forcing the bad case
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw.load_bad_channels(bad_file_wrong, force=True)
n_found = sum(['1 bad channel' in str(ww.message) for ww in w])
assert_equal(n_found, 1) # there could be other irrelevant errors
# write it out, read it in, and check
raw.save(op.join(tempdir, 'foo_raw.fif'), overwrite=True)
raw_new = Raw(op.join(tempdir, 'foo_raw.fif'))
assert_equal(correct_bads, raw_new.info['bads'])
# Check that bad channels are cleared
raw.load_bad_channels(None)
raw.save(op.join(tempdir, 'foo_raw.fif'), overwrite=True)
raw_new = Raw(op.join(tempdir, 'foo_raw.fif'))
assert_equal([], raw_new.info['bads'])
@slow_test
@testing.requires_testing_data
def test_io_raw():
"""Test IO for raw data (Neuromag + CTF + gz)
"""
tempdir = _TempDir()
# test unicode io
for chars in [b'\xc3\xa4\xc3\xb6\xc3\xa9', b'a']:
with Raw(fif_fname) as r:
assert_true('Raw' in repr(r))
assert_true(op.basename(fif_fname) in repr(r))
desc1 = r.info['description'] = chars.decode('utf-8')
temp_file = op.join(tempdir, 'raw.fif')
r.save(temp_file, overwrite=True)
with Raw(temp_file) as r2:
desc2 = r2.info['description']
assert_equal(desc1, desc2)
# Let's construct a simple test for IO first
raw = Raw(fif_fname).crop(0, 3.5)
raw.load_data()
# put in some data that we know the values of
data = rng.randn(raw._data.shape[0], raw._data.shape[1])
raw._data[:, :] = data
# save it somewhere
fname = op.join(tempdir, 'test_copy_raw.fif')
raw.save(fname, buffer_size_sec=1.0)
# read it in, make sure the whole thing matches
raw = Raw(fname)
assert_allclose(data, raw[:, :][0], rtol=1e-6, atol=1e-20)
# let's read portions across the 1-sec tag boundary, too
inds = raw.time_as_index([1.75, 2.25])
sl = slice(inds[0], inds[1])
assert_allclose(data[:, sl], raw[:, sl][0], rtol=1e-6, atol=1e-20)
# now let's do some real I/O
fnames_in = [fif_fname, test_fif_gz_fname, ctf_fname]
fnames_out = ['raw.fif', 'raw.fif.gz', 'raw.fif']
for fname_in, fname_out in zip(fnames_in, fnames_out):
fname_out = op.join(tempdir, fname_out)
raw = Raw(fname_in)
nchan = raw.info['nchan']
ch_names = raw.info['ch_names']
meg_channels_idx = [k for k in range(nchan)
if ch_names[k][0] == 'M']
n_channels = 100
meg_channels_idx = meg_channels_idx[:n_channels]
start, stop = raw.time_as_index([0, 5])
data, times = raw[meg_channels_idx, start:(stop + 1)]
meg_ch_names = [ch_names[k] for k in meg_channels_idx]
# Set up pick list: MEG + STI 014 - bad channels
include = ['STI 014']
include += meg_ch_names
picks = pick_types(raw.info, meg=True, eeg=False, stim=True,
misc=True, ref_meg=True, include=include,
exclude='bads')
# Writing with drop_small_buffer True
raw.save(fname_out, picks, tmin=0, tmax=4, buffer_size_sec=3,
drop_small_buffer=True, overwrite=True)
raw2 = Raw(fname_out)
sel = pick_channels(raw2.ch_names, meg_ch_names)
data2, times2 = raw2[sel, :]
assert_true(times2.max() <= 3)
# Writing
raw.save(fname_out, picks, tmin=0, tmax=5, overwrite=True)
if fname_in == fif_fname or fname_in == fif_fname + '.gz':
assert_equal(len(raw.info['dig']), 146)
raw2 = Raw(fname_out)
sel = pick_channels(raw2.ch_names, meg_ch_names)
data2, times2 = raw2[sel, :]
assert_allclose(data, data2, rtol=1e-6, atol=1e-20)
assert_allclose(times, times2)
assert_allclose(raw.info['sfreq'], raw2.info['sfreq'], rtol=1e-5)
# check transformations
for trans in ['dev_head_t', 'dev_ctf_t', 'ctf_head_t']:
if raw.info[trans] is None:
assert_true(raw2.info[trans] is None)
else:
assert_array_equal(raw.info[trans]['trans'],
raw2.info[trans]['trans'])
# check transformation 'from' and 'to'
if trans.startswith('dev'):
from_id = FIFF.FIFFV_COORD_DEVICE
else:
from_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD
if trans[4:8] == 'head':
to_id = FIFF.FIFFV_COORD_HEAD
else:
to_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD
for raw_ in [raw, raw2]:
assert_equal(raw_.info[trans]['from'], from_id)
assert_equal(raw_.info[trans]['to'], to_id)
if fname_in == fif_fname or fname_in == fif_fname + '.gz':
assert_allclose(raw.info['dig'][0]['r'], raw2.info['dig'][0]['r'])
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
raw_badname = op.join(tempdir, 'test-bad-name.fif.gz')
raw.save(raw_badname)
Raw(raw_badname)
assert_naming(w, 'test_raw_fiff.py', 2)
@testing.requires_testing_data
def test_io_complex():
"""Test IO with complex data types
"""
rng = np.random.RandomState(0)
tempdir = _TempDir()
dtypes = [np.complex64, np.complex128]
raw = _test_raw_reader(Raw, fnames=fif_fname)
picks = np.arange(5)
start, stop = raw.time_as_index([0, 5])
data_orig, _ = raw[picks, start:stop]
for di, dtype in enumerate(dtypes):
imag_rand = np.array(1j * rng.randn(data_orig.shape[0],
data_orig.shape[1]), dtype)
raw_cp = raw.copy()
raw_cp._data = np.array(raw_cp._data, dtype)
raw_cp._data[picks, start:stop] += imag_rand
# this should throw an error because it's complex
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw_cp.save(op.join(tempdir, 'raw.fif'), picks, tmin=0, tmax=5,
overwrite=True)
# warning gets thrown on every instance b/c simplifilter('always')
assert_equal(len(w), 1)
raw2 = Raw(op.join(tempdir, 'raw.fif'))
raw2_data, _ = raw2[picks, :]
n_samp = raw2_data.shape[1]
assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
# with preloading
raw2 = Raw(op.join(tempdir, 'raw.fif'), preload=True)
raw2_data, _ = raw2[picks, :]
n_samp = raw2_data.shape[1]
assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
@testing.requires_testing_data
def test_getitem():
"""Test getitem/indexing of Raw
"""
for preload in [False, True, 'memmap.dat']:
raw = Raw(fif_fname, preload=preload)
data, times = raw[0, :]
data1, times1 = raw[0]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
data, times = raw[0:2, :]
data1, times1 = raw[0:2]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
data1, times1 = raw[[0, 1]]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
assert_array_equal(raw[-10:, :][0],
raw[len(raw.ch_names) - 10:, :][0])
assert_raises(ValueError, raw.__getitem__,
(slice(-len(raw.ch_names) - 1), slice(None)))
@testing.requires_testing_data
def test_proj():
"""Test SSP proj operations
"""
tempdir = _TempDir()
for proj in [True, False]:
raw = Raw(fif_fname, preload=False, proj=proj)
assert_true(all(p['active'] == proj for p in raw.info['projs']))
data, times = raw[0:2, :]
data1, times1 = raw[0:2]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
# test adding / deleting proj
if proj:
assert_raises(ValueError, raw.add_proj, [],
{'remove_existing': True})
assert_raises(ValueError, raw.del_proj, 0)
else:
projs = deepcopy(raw.info['projs'])
n_proj = len(raw.info['projs'])
raw.del_proj(0)
assert_equal(len(raw.info['projs']), n_proj - 1)
raw.add_proj(projs, remove_existing=False)
# Test that already existing projections are not added.
assert_equal(len(raw.info['projs']), n_proj)
raw.add_proj(projs[:-1], remove_existing=True)
assert_equal(len(raw.info['projs']), n_proj - 1)
# test apply_proj() with and without preload
for preload in [True, False]:
raw = Raw(fif_fname, preload=preload, proj=False)
data, times = raw[:, 0:2]
raw.apply_proj()
data_proj_1 = np.dot(raw._projector, data)
# load the file again without proj
raw = Raw(fif_fname, preload=preload, proj=False)
# write the file with proj. activated, make sure proj has been applied
raw.save(op.join(tempdir, 'raw.fif'), proj=True, overwrite=True)
raw2 = Raw(op.join(tempdir, 'raw.fif'), proj=False)
data_proj_2, _ = raw2[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert_true(all(p['active'] for p in raw2.info['projs']))
# read orig file with proj. active
raw2 = Raw(fif_fname, preload=preload, proj=True)
data_proj_2, _ = raw2[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert_true(all(p['active'] for p in raw2.info['projs']))
# test that apply_proj works
raw.apply_proj()
data_proj_2, _ = raw[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert_allclose(data_proj_2, np.dot(raw._projector, data_proj_2))
tempdir = _TempDir()
out_fname = op.join(tempdir, 'test_raw.fif')
raw = read_raw_fif(test_fif_fname, preload=True).crop(0, 0.002, copy=False)
raw.pick_types(meg=False, eeg=True)
raw.info['projs'] = [raw.info['projs'][-1]]
raw._data.fill(0)
raw._data[-1] = 1.
raw.save(out_fname)
raw = read_raw_fif(out_fname, proj=True, preload=False)
assert_allclose(raw[:, :][0][:1], raw[0, :][0])
@testing.requires_testing_data
def test_preload_modify():
"""Test preloading and modifying data
"""
tempdir = _TempDir()
for preload in [False, True, 'memmap.dat']:
raw = Raw(fif_fname, preload=preload)
nsamp = raw.last_samp - raw.first_samp + 1
picks = pick_types(raw.info, meg='grad', exclude='bads')
data = rng.randn(len(picks), nsamp // 2)
try:
raw[picks, :nsamp // 2] = data
except RuntimeError as err:
if not preload:
continue
else:
raise err
tmp_fname = op.join(tempdir, 'raw.fif')
raw.save(tmp_fname, overwrite=True)
raw_new = Raw(tmp_fname)
data_new, _ = raw_new[picks, :nsamp / 2]
assert_allclose(data, data_new)
@slow_test
@testing.requires_testing_data
def test_filter():
"""Test filtering (FIR and IIR) and Raw.apply_function interface
"""
raw = Raw(fif_fname).crop(0, 7)
raw.load_data()
sig_dec = 11
sig_dec_notch = 12
sig_dec_notch_fit = 12
picks_meg = pick_types(raw.info, meg=True, exclude='bads')
picks = picks_meg[:4]
filter_params = dict(picks=picks, n_jobs=2)
raw_lp = raw.copy().filter(0., 4.0 - 0.25, **filter_params)
raw_hp = raw.copy().filter(8.0 + 0.25, None, **filter_params)
raw_bp = raw.copy().filter(4.0 + 0.25, 8.0 - 0.25, **filter_params)
raw_bs = raw.copy().filter(8.0 + 0.25, 4.0 - 0.25, **filter_params)
data, _ = raw[picks, :]
lp_data, _ = raw_lp[picks, :]
hp_data, _ = raw_hp[picks, :]
bp_data, _ = raw_bp[picks, :]
bs_data, _ = raw_bs[picks, :]
assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec)
assert_array_almost_equal(data, bp_data + bs_data, sig_dec)
filter_params_iir = dict(picks=picks, n_jobs=2, method='iir')
raw_lp_iir = raw.copy().filter(0., 4.0, **filter_params_iir)
raw_hp_iir = raw.copy().filter(8.0, None, **filter_params_iir)
raw_bp_iir = raw.copy().filter(4.0, 8.0, **filter_params_iir)
del filter_params_iir
lp_data_iir, _ = raw_lp_iir[picks, :]
hp_data_iir, _ = raw_hp_iir[picks, :]
bp_data_iir, _ = raw_bp_iir[picks, :]
summation = lp_data_iir + hp_data_iir + bp_data_iir
assert_array_almost_equal(data[:, 100:-100], summation[:, 100:-100],
sig_dec)
# make sure we didn't touch other channels
data, _ = raw[picks_meg[4:], :]
bp_data, _ = raw_bp[picks_meg[4:], :]
assert_array_equal(data, bp_data)
bp_data_iir, _ = raw_bp_iir[picks_meg[4:], :]
assert_array_equal(data, bp_data_iir)
# ... and that inplace changes are inplace
raw_copy = raw.copy()
raw_copy.filter(None, 20., picks=picks, n_jobs=2)
assert_true(raw._data[0, 0] != raw_copy._data[0, 0])
assert_equal(raw.copy().filter(None, 20., **filter_params)._data,
raw_copy._data)
# do a very simple check on line filtering
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
raw_bs = raw.copy().filter(60.0 + 0.5, 60.0 - 0.5, **filter_params)
data_bs, _ = raw_bs[picks, :]
raw_notch = raw.copy().notch_filter(
60.0, picks=picks, n_jobs=2, method='fft')
data_notch, _ = raw_notch[picks, :]
assert_array_almost_equal(data_bs, data_notch, sig_dec_notch)
# now use the sinusoidal fitting
raw_notch = raw.copy().notch_filter(
None, picks=picks, n_jobs=2, method='spectrum_fit')
data_notch, _ = raw_notch[picks, :]
data, _ = raw[picks, :]
assert_array_almost_equal(data, data_notch, sig_dec_notch_fit)
def test_filter_picks():
"""Test filtering default channel picks"""
ch_types = ['mag', 'grad', 'eeg', 'seeg', 'misc', 'stim', 'ecog']
info = create_info(ch_names=ch_types, ch_types=ch_types, sfreq=256)
raw = RawArray(data=np.zeros((len(ch_types), 1000)), info=info)
# -- Deal with meg mag grad exception
ch_types = ('misc', 'stim', 'meg', 'eeg', 'seeg', 'ecog')
# -- Filter data channels
for ch_type in ('mag', 'grad', 'eeg', 'seeg', 'ecog'):
picks = dict((ch, ch == ch_type) for ch in ch_types)
picks['meg'] = ch_type if ch_type in ('mag', 'grad') else False
raw_ = raw.copy().pick_types(**picks)
# Avoid RuntimeWarning due to Attenuation
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw_.filter(10, 30)
assert_true(any(['Attenuation' in str(ww.message) for ww in w]))
# -- Error if no data channel
for ch_type in ('misc', 'stim'):
picks = dict((ch, ch == ch_type) for ch in ch_types)
raw_ = raw.copy().pick_types(**picks)
assert_raises(RuntimeError, raw_.filter, 10, 30)
@testing.requires_testing_data
def test_crop():
"""Test cropping raw files
"""
# split a concatenated file to test a difficult case
raw = Raw([fif_fname, fif_fname], preload=False)
split_size = 10. # in seconds
sfreq = raw.info['sfreq']
nsamp = (raw.last_samp - raw.first_samp + 1)
# do an annoying case (off-by-one splitting)
tmins = np.r_[1., np.round(np.arange(0., nsamp - 1, split_size * sfreq))]
tmins = np.sort(tmins)
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1]))
tmaxs /= sfreq
tmins /= sfreq
raws = [None] * len(tmins)
for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
raws[ri] = raw.copy().crop(tmin, tmax, copy=False)
all_raw_2 = concatenate_raws(raws, preload=False)
assert_equal(raw.first_samp, all_raw_2.first_samp)
assert_equal(raw.last_samp, all_raw_2.last_samp)
assert_array_equal(raw[:, :][0], all_raw_2[:, :][0])
tmins = np.round(np.arange(0., nsamp - 1, split_size * sfreq))
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1]))
tmaxs /= sfreq
tmins /= sfreq
# going in revere order so the last fname is the first file (need it later)
raws = [None] * len(tmins)
for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
raws[ri] = raw.copy().crop(tmin, tmax, copy=False)
# test concatenation of split file
all_raw_1 = concatenate_raws(raws, preload=False)
all_raw_2 = raw.copy().crop(0, None, copy=False)
for ar in [all_raw_1, all_raw_2]:
assert_equal(raw.first_samp, ar.first_samp)
assert_equal(raw.last_samp, ar.last_samp)
assert_array_equal(raw[:, :][0], ar[:, :][0])
# test shape consistency of cropped raw
data = np.zeros((1, 1002001))
info = create_info(1, 1000)
raw = RawArray(data, info)
for tmin in range(0, 1001, 100):
raw1 = raw.copy().crop(tmin=tmin, tmax=tmin + 2, copy=False)
assert_equal(raw1[:][0].shape, (1, 2001))
@testing.requires_testing_data
def test_resample():
"""Test resample (with I/O and multiple files)
"""
tempdir = _TempDir()
raw = Raw(fif_fname).crop(0, 3, copy=False)
raw.load_data()
raw_resamp = raw.copy()
sfreq = raw.info['sfreq']
# test parallel on upsample
raw_resamp.resample(sfreq * 2, n_jobs=2, npad='auto')
assert_equal(raw_resamp.n_times, len(raw_resamp.times))
raw_resamp.save(op.join(tempdir, 'raw_resamp-raw.fif'))
raw_resamp = Raw(op.join(tempdir, 'raw_resamp-raw.fif'), preload=True)
assert_equal(sfreq, raw_resamp.info['sfreq'] / 2)
assert_equal(raw.n_times, raw_resamp.n_times / 2)
assert_equal(raw_resamp._data.shape[1], raw_resamp.n_times)
assert_equal(raw._data.shape[0], raw_resamp._data.shape[0])
# test non-parallel on downsample
raw_resamp.resample(sfreq, n_jobs=1, npad='auto')
assert_equal(raw_resamp.info['sfreq'], sfreq)
assert_equal(raw._data.shape, raw_resamp._data.shape)
assert_equal(raw.first_samp, raw_resamp.first_samp)
assert_equal(raw.last_samp, raw.last_samp)
# upsampling then downsampling doubles resampling error, but this still
# works (hooray). Note that the stim channels had to be sub-sampled
# without filtering to be accurately preserved
# note we have to treat MEG and EEG+STIM channels differently (tols)
assert_allclose(raw._data[:306, 200:-200],
raw_resamp._data[:306, 200:-200],
rtol=1e-2, atol=1e-12)
assert_allclose(raw._data[306:, 200:-200],
raw_resamp._data[306:, 200:-200],
rtol=1e-2, atol=1e-7)
# now check multiple file support w/resampling, as order of operations
# (concat, resample) should not affect our data
raw1 = raw.copy()
raw2 = raw.copy()
raw3 = raw.copy()
raw4 = raw.copy()
raw1 = concatenate_raws([raw1, raw2])
raw1.resample(10., npad='auto')
raw3.resample(10., npad='auto')
raw4.resample(10., npad='auto')
raw3 = concatenate_raws([raw3, raw4])
assert_array_equal(raw1._data, raw3._data)
assert_array_equal(raw1._first_samps, raw3._first_samps)
assert_array_equal(raw1._last_samps, raw3._last_samps)
assert_array_equal(raw1._raw_lengths, raw3._raw_lengths)
assert_equal(raw1.first_samp, raw3.first_samp)
assert_equal(raw1.last_samp, raw3.last_samp)
assert_equal(raw1.info['sfreq'], raw3.info['sfreq'])
# test resampling of stim channel
# basic decimation
stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
assert_allclose(raw.resample(8., npad='auto')._data,
[[1, 1, 0, 0, 1, 1, 0, 0]])
# decimation of multiple stim channels
raw = RawArray(2 * [stim], create_info(2, len(stim), 2 * ['stim']))
assert_allclose(raw.resample(8., npad='auto')._data,
[[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0]])
# decimation that could potentially drop events if the decimation is
# done naively
stim = [0, 0, 0, 1, 1, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
assert_allclose(raw.resample(4., npad='auto')._data,
[[0, 1, 1, 0]])
# two events are merged in this case (warning)
stim = [0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw.resample(8., npad='auto')
assert_true(len(w) == 1)
# events are dropped in this case (warning)
stim = [0, 1, 1, 0, 0, 1, 1, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw.resample(4., npad='auto')
assert_true(len(w) == 1)
# test resampling events: this should no longer give a warning
stim = [0, 1, 1, 0, 0, 1, 1, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
events = find_events(raw)
raw, events = raw.resample(4., events=events, npad='auto')
assert_equal(events, np.array([[0, 0, 1], [2, 0, 1]]))
# test copy flag
stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
raw_resampled = raw.copy().resample(4., npad='auto')
assert_true(raw_resampled is not raw)
raw_resampled = raw.resample(4., npad='auto')
assert_true(raw_resampled is raw)
# resample should still work even when no stim channel is present
raw = RawArray(np.random.randn(1, 100), create_info(1, 100, ['eeg']))
raw.info['lowpass'] = 50.
raw.resample(10, npad='auto')
assert_equal(raw.info['lowpass'], 5.)
assert_equal(len(raw), 10)
@testing.requires_testing_data
def test_hilbert():
"""Test computation of analytic signal using hilbert
"""
raw = Raw(fif_fname, preload=True)
picks_meg = pick_types(raw.info, meg=True, exclude='bads')
picks = picks_meg[:4]
raw_filt = raw.copy()
raw_filt.filter(10, 20)
raw_filt_2 = raw_filt.copy()
raw2 = raw.copy()
raw3 = raw.copy()
raw.apply_hilbert(picks)
raw2.apply_hilbert(picks, envelope=True, n_jobs=2)
# Test custom n_fft
raw_filt.apply_hilbert(picks)
raw_filt_2.apply_hilbert(picks, n_fft=raw_filt_2.n_times + 1000)
assert_equal(raw_filt._data.shape, raw_filt_2._data.shape)
assert_allclose(raw_filt._data[:, 50:-50], raw_filt_2._data[:, 50:-50],
atol=1e-13, rtol=1e-2)
assert_raises(ValueError, raw3.apply_hilbert, picks,
n_fft=raw3.n_times - 100)
env = np.abs(raw._data[picks, :])
assert_allclose(env, raw2._data[picks, :], rtol=1e-2, atol=1e-13)
@testing.requires_testing_data
def test_raw_copy():
"""Test Raw copy
"""
raw = Raw(fif_fname, preload=True)
data, _ = raw[:, :]
copied = raw.copy()
copied_data, _ = copied[:, :]
assert_array_equal(data, copied_data)
assert_equal(sorted(raw.__dict__.keys()),
sorted(copied.__dict__.keys()))
raw = Raw(fif_fname, preload=False)
data, _ = raw[:, :]
copied = raw.copy()
copied_data, _ = copied[:, :]
assert_array_equal(data, copied_data)
assert_equal(sorted(raw.__dict__.keys()),
sorted(copied.__dict__.keys()))
@requires_pandas
def test_to_data_frame():
"""Test raw Pandas exporter"""
raw = Raw(test_fif_fname, preload=True)
_, times = raw[0, :10]
df = raw.to_data_frame()
assert_true((df.columns == raw.ch_names).all())
assert_array_equal(np.round(times * 1e3), df.index.values[:10])
df = raw.to_data_frame(index=None)
assert_true('time' in df.index.names)
assert_array_equal(df.values[:, 0], raw._data[0] * 1e13)
assert_array_equal(df.values[:, 2], raw._data[2] * 1e15)
def test_add_channels():
"""Test raw splitting / re-appending channel types
"""
raw = Raw(test_fif_fname).crop(0, 1, copy=False).load_data()
raw_nopre = Raw(test_fif_fname, preload=False)
raw_eeg_meg = raw.copy().pick_types(meg=True, eeg=True)
raw_eeg = raw.copy().pick_types(meg=False, eeg=True)
raw_meg = raw.copy().pick_types(meg=True, eeg=False)
raw_stim = raw.copy().pick_types(meg=False, eeg=False, stim=True)
raw_new = raw_meg.copy().add_channels([raw_eeg, raw_stim])
assert_true(
all(ch in raw_new.ch_names
for ch in list(raw_stim.ch_names) + list(raw_meg.ch_names))
)
raw_new = raw_meg.copy().add_channels([raw_eeg])
assert_true(ch in raw_new.ch_names for ch in raw.ch_names)
assert_array_equal(raw_new[:, :][0], raw_eeg_meg[:, :][0])
assert_array_equal(raw_new[:, :][1], raw[:, :][1])
assert_true(all(ch not in raw_new.ch_names for ch in raw_stim.ch_names))
# Testing force updates
raw_arr_info = create_info(['1', '2'], raw_meg.info['sfreq'], 'eeg')
orig_head_t = raw_arr_info['dev_head_t']
raw_arr = np.random.randn(2, raw_eeg.n_times)
raw_arr = RawArray(raw_arr, raw_arr_info)
# This should error because of conflicts in Info
assert_raises(ValueError, raw_meg.copy().add_channels, [raw_arr])
raw_meg.copy().add_channels([raw_arr], force_update_info=True)
# Make sure that values didn't get overwritten
assert_true(raw_arr.info['dev_head_t'] is orig_head_t)
# Now test errors
raw_badsf = raw_eeg.copy()
raw_badsf.info['sfreq'] = 3.1415927
raw_eeg.crop(.5, copy=False)
assert_raises(AssertionError, raw_meg.add_channels, [raw_nopre])
assert_raises(RuntimeError, raw_meg.add_channels, [raw_badsf])
assert_raises(AssertionError, raw_meg.add_channels, [raw_eeg])
assert_raises(ValueError, raw_meg.add_channels, [raw_meg])
assert_raises(AssertionError, raw_meg.add_channels, raw_badsf)
@testing.requires_testing_data
def test_raw_time_as_index():
""" Test time as index conversion"""
raw = Raw(fif_fname, preload=True)
with warnings.catch_warnings(record=True): # deprecation
first_samp = raw.time_as_index([0], True)[0]
assert_equal(raw.first_samp, -first_samp)
@testing.requires_testing_data
def test_save():
""" Test saving raw"""
tempdir = _TempDir()
raw = Raw(fif_fname, preload=False)
# can't write over file being read
assert_raises(ValueError, raw.save, fif_fname)
raw = Raw(fif_fname, preload=True)
# can't overwrite file without overwrite=True
assert_raises(IOError, raw.save, fif_fname)
# test abspath support and annotations
annot = Annotations([10], [10], ['test'], raw.info['meas_date'])
raw.annotations = annot
new_fname = op.join(op.abspath(op.curdir), 'break-raw.fif')
raw.save(op.join(tempdir, new_fname), overwrite=True)
new_raw = Raw(op.join(tempdir, new_fname), preload=False)
assert_raises(ValueError, new_raw.save, new_fname)
assert_array_equal(annot.onset, new_raw.annotations.onset)
assert_array_equal(annot.duration, new_raw.annotations.duration)
assert_array_equal(annot.description, new_raw.annotations.description)
assert_equal(annot.orig_time, new_raw.annotations.orig_time)
# make sure we can overwrite the file we loaded when preload=True
new_raw = Raw(op.join(tempdir, new_fname), preload=True)
new_raw.save(op.join(tempdir, new_fname), overwrite=True)
os.remove(new_fname)
@testing.requires_testing_data
def test_with_statement():
""" Test with statement """
for preload in [True, False]:
with Raw(fif_fname, preload=preload) as raw_:
print(raw_)
def test_compensation_raw():
"""Test Raw compensation
"""
tempdir = _TempDir()
raw1 = Raw(ctf_comp_fname, compensation=None)
assert_true(raw1.comp is None)
data1, times1 = raw1[:, :]
raw2 = Raw(ctf_comp_fname, compensation=3)
data2, times2 = raw2[:, :]
assert_true(raw2.comp is None) # unchanged (data come with grade 3)
assert_array_equal(times1, times2)
assert_array_equal(data1, data2)
raw3 = Raw(ctf_comp_fname, compensation=1)
data3, times3 = raw3[:, :]
assert_true(raw3.comp is not None)
assert_array_equal(times1, times3)
# make sure it's different with a different compensation:
assert_true(np.mean(np.abs(data1 - data3)) > 1e-12)
assert_raises(ValueError, Raw, ctf_comp_fname, compensation=33)
# Try IO with compensation
temp_file = op.join(tempdir, 'raw.fif')
raw1.save(temp_file, overwrite=True)
raw4 = Raw(temp_file)
data4, times4 = raw4[:, :]
assert_array_equal(times1, times4)
assert_array_equal(data1, data4)
# Now save the file that has modified compensation
# and make sure we can the same data as input ie. compensation
# is undone
raw3.save(temp_file, overwrite=True)
raw5 = Raw(temp_file)
data5, times5 = raw5[:, :]
assert_array_equal(times1, times5)
assert_allclose(data1, data5, rtol=1e-12, atol=1e-22)
@requires_mne
def test_compensation_raw_mne():
"""Test Raw compensation by comparing with MNE
"""
tempdir = _TempDir()
def compensate_mne(fname, grad):
tmp_fname = op.join(tempdir, 'mne_ctf_test_raw.fif')
cmd = ['mne_process_raw', '--raw', fname, '--save', tmp_fname,
'--grad', str(grad), '--projoff', '--filteroff']
run_subprocess(cmd)
return Raw(tmp_fname, preload=True)
for grad in [0, 2, 3]:
raw_py = Raw(ctf_comp_fname, preload=True, compensation=grad)
raw_c = compensate_mne(ctf_comp_fname, grad)
assert_allclose(raw_py._data, raw_c._data, rtol=1e-6, atol=1e-17)
@testing.requires_testing_data
def test_drop_channels_mixin():
"""Test channels-dropping functionality
"""
raw = Raw(fif_fname, preload=True)
drop_ch = raw.ch_names[:3]
ch_names = raw.ch_names[3:]
ch_names_orig = raw.ch_names
dummy = raw.copy().drop_channels(drop_ch)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, raw.ch_names)
assert_equal(len(ch_names_orig), raw._data.shape[0])
raw.drop_channels(drop_ch)
assert_equal(ch_names, raw.ch_names)
assert_equal(len(ch_names), len(raw._cals))
assert_equal(len(ch_names), raw._data.shape[0])
@testing.requires_testing_data
def test_pick_channels_mixin():
"""Test channel-picking functionality
"""
# preload is True
raw = Raw(fif_fname, preload=True)
ch_names = raw.ch_names[:3]
ch_names_orig = raw.ch_names
dummy = raw.copy().pick_channels(ch_names)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, raw.ch_names)
assert_equal(len(ch_names_orig), raw._data.shape[0])
raw.pick_channels(ch_names) # copy is False
assert_equal(ch_names, raw.ch_names)
assert_equal(len(ch_names), len(raw._cals))
assert_equal(len(ch_names), raw._data.shape[0])
assert_raises(ValueError, raw.pick_channels, ch_names[0])
raw = Raw(fif_fname, preload=False)
assert_raises(RuntimeError, raw.pick_channels, ch_names)
assert_raises(RuntimeError, raw.drop_channels, ch_names)
@testing.requires_testing_data
def test_equalize_channels():
"""Test equalization of channels
"""
raw1 = Raw(fif_fname, preload=True)
raw2 = raw1.copy()
ch_names = raw1.ch_names[2:]
raw1.drop_channels(raw1.ch_names[:1])
raw2.drop_channels(raw2.ch_names[1:2])
my_comparison = [raw1, raw2]
equalize_channels(my_comparison)
for e in my_comparison:
assert_equal(ch_names, e.ch_names)
run_tests_if_main()
|
bsd-3-clause
|
ldirer/scikit-learn
|
sklearn/tests/test_multioutput.py
|
4
|
12671
|
from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.exceptions import NotFittedError
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingRegressor, RandomForestClassifier
from sklearn.linear_model import Lasso
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import SGDRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multioutput import MultiOutputRegressor, MultiOutputClassifier
def test_multi_target_regression():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
references = np.zeros_like(y_test)
for n in range(3):
rgr = GradientBoostingRegressor(random_state=0)
rgr.fit(X_train, y_train[:, n])
references[:, n] = rgr.predict(X_test)
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X_train, y_train)
y_pred = rgr.predict(X_test)
assert_almost_equal(references, y_pred)
def test_multi_target_regression_partial_fit():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
references = np.zeros_like(y_test)
half_index = 25
for n in range(3):
sgr = SGDRegressor(random_state=0)
sgr.partial_fit(X_train[:half_index], y_train[:half_index, n])
sgr.partial_fit(X_train[half_index:], y_train[half_index:, n])
references[:, n] = sgr.predict(X_test)
sgr = MultiOutputRegressor(SGDRegressor(random_state=0))
sgr.partial_fit(X_train[:half_index], y_train[:half_index])
sgr.partial_fit(X_train[half_index:], y_train[half_index:])
y_pred = sgr.predict(X_test)
assert_almost_equal(references, y_pred)
assert_false(hasattr(MultiOutputRegressor(Lasso), 'partial_fit'))
def test_multi_target_regression_one_target():
# Test multi target regression raises
X, y = datasets.make_regression(n_targets=1)
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
assert_raises(ValueError, rgr.fit, X, y)
def test_multi_target_sparse_regression():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test = X[50:]
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
rgr = MultiOutputRegressor(Lasso(random_state=0))
rgr_sparse = MultiOutputRegressor(Lasso(random_state=0))
rgr.fit(X_train, y_train)
rgr_sparse.fit(sparse(X_train), y_train)
assert_almost_equal(rgr.predict(X_test),
rgr_sparse.predict(sparse(X_test)))
def test_multi_target_sample_weights_api():
X = [[1, 2, 3], [4, 5, 6]]
y = [[3.141, 2.718], [2.718, 3.141]]
w = [0.8, 0.6]
rgr = MultiOutputRegressor(Lasso())
assert_raises_regex(ValueError, "does not support sample weights",
rgr.fit, X, y, w)
# no exception should be raised if the base estimator supports weights
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X, y, w)
def test_multi_target_sample_weight_partial_fit():
# weighted regressor
X = [[1, 2, 3], [4, 5, 6]]
y = [[3.141, 2.718], [2.718, 3.141]]
w = [2., 1.]
rgr_w = MultiOutputRegressor(SGDRegressor(random_state=0))
rgr_w.partial_fit(X, y, w)
# weighted with different weights
w = [2., 2.]
rgr = MultiOutputRegressor(SGDRegressor(random_state=0))
rgr.partial_fit(X, y, w)
assert_not_equal(rgr.predict(X)[0][0], rgr_w.predict(X)[0][0])
def test_multi_target_sample_weights():
# weighted regressor
Xw = [[1, 2, 3], [4, 5, 6]]
yw = [[3.141, 2.718], [2.718, 3.141]]
w = [2., 1.]
rgr_w = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1, 2, 3], [1, 2, 3], [4, 5, 6]]
y = [[3.141, 2.718], [3.141, 2.718], [2.718, 3.141]]
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X, y)
X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]]
assert_almost_equal(rgr.predict(X_test), rgr_w.predict(X_test))
# Import the data
iris = datasets.load_iris()
# create a multiple targets by randomized shuffling and concatenating y.
X = iris.data
y1 = iris.target
y2 = shuffle(y1, random_state=1)
y3 = shuffle(y1, random_state=2)
y = np.column_stack((y1, y2, y3))
n_samples, n_features = X.shape
n_outputs = y.shape[1]
n_classes = len(np.unique(y1))
classes = list(map(np.unique, (y1, y2, y3)))
def test_multi_output_classification_partial_fit_parallelism():
sgd_linear_clf = SGDClassifier(loss='log', random_state=1)
mor = MultiOutputClassifier(sgd_linear_clf, n_jobs=-1)
mor.partial_fit(X, y, classes)
est1 = mor.estimators_[0]
mor.partial_fit(X, y)
est2 = mor.estimators_[0]
# parallelism requires this to be the case for a sane implementation
assert_false(est1 is est2)
def test_multi_output_classification_partial_fit():
# test if multi_target initializes correctly with base estimator and fit
# assert predictions work as expected for predict
sgd_linear_clf = SGDClassifier(loss='log', random_state=1)
multi_target_linear = MultiOutputClassifier(sgd_linear_clf)
# train the multi_target_linear and also get the predictions.
half_index = X.shape[0] // 2
multi_target_linear.partial_fit(
X[:half_index], y[:half_index], classes=classes)
first_predictions = multi_target_linear.predict(X)
assert_equal((n_samples, n_outputs), first_predictions.shape)
multi_target_linear.partial_fit(X[half_index:], y[half_index:])
second_predictions = multi_target_linear.predict(X)
assert_equal((n_samples, n_outputs), second_predictions.shape)
# train the linear classification with each column and assert that
# predictions are equal after first partial_fit and second partial_fit
for i in range(3):
# create a clone with the same state
sgd_linear_clf = clone(sgd_linear_clf)
sgd_linear_clf.partial_fit(
X[:half_index], y[:half_index, i], classes=classes[i])
assert_array_equal(sgd_linear_clf.predict(X), first_predictions[:, i])
sgd_linear_clf.partial_fit(X[half_index:], y[half_index:, i])
assert_array_equal(sgd_linear_clf.predict(X), second_predictions[:, i])
def test_mutli_output_classifiation_partial_fit_no_first_classes_exception():
sgd_linear_clf = SGDClassifier(loss='log', random_state=1)
multi_target_linear = MultiOutputClassifier(sgd_linear_clf)
assert_raises_regex(ValueError, "classes must be passed on the first call "
"to partial_fit.",
multi_target_linear.partial_fit, X, y)
def test_multi_output_classification():
# test if multi_target initializes correctly with base estimator and fit
# assert predictions work as expected for predict, prodict_proba and score
forest = RandomForestClassifier(n_estimators=10, random_state=1)
multi_target_forest = MultiOutputClassifier(forest)
# train the multi_target_forest and also get the predictions.
multi_target_forest.fit(X, y)
predictions = multi_target_forest.predict(X)
assert_equal((n_samples, n_outputs), predictions.shape)
predict_proba = multi_target_forest.predict_proba(X)
assert len(predict_proba) == n_outputs
for class_probabilities in predict_proba:
assert_equal((n_samples, n_classes), class_probabilities.shape)
assert_array_equal(np.argmax(np.dstack(predict_proba), axis=1),
predictions)
# train the forest with each column and assert that predictions are equal
for i in range(3):
forest_ = clone(forest) # create a clone with the same state
forest_.fit(X, y[:, i])
assert_equal(list(forest_.predict(X)), list(predictions[:, i]))
assert_array_equal(list(forest_.predict_proba(X)),
list(predict_proba[i]))
def test_multiclass_multioutput_estimator():
# test to check meta of meta estimators
svc = LinearSVC(random_state=0)
multi_class_svc = OneVsRestClassifier(svc)
multi_target_svc = MultiOutputClassifier(multi_class_svc)
multi_target_svc.fit(X, y)
predictions = multi_target_svc.predict(X)
assert_equal((n_samples, n_outputs), predictions.shape)
# train the forest with each column and assert that predictions are equal
for i in range(3):
multi_class_svc_ = clone(multi_class_svc) # create a clone
multi_class_svc_.fit(X, y[:, i])
assert_equal(list(multi_class_svc_.predict(X)),
list(predictions[:, i]))
def test_multiclass_multioutput_estimator_predict_proba():
seed = 542
# make test deterministic
rng = np.random.RandomState(seed)
# random features
X = rng.normal(size=(5, 5))
# random labels
y1 = np.array(['b', 'a', 'a', 'b', 'a']).reshape(5, 1) # 2 classes
y2 = np.array(['d', 'e', 'f', 'e', 'd']).reshape(5, 1) # 3 classes
Y = np.concatenate([y1, y2], axis=1)
clf = MultiOutputClassifier(LogisticRegression(random_state=seed))
clf.fit(X, Y)
y_result = clf.predict_proba(X)
y_actual = [np.array([[0.23481764, 0.76518236],
[0.67196072, 0.32803928],
[0.54681448, 0.45318552],
[0.34883923, 0.65116077],
[0.73687069, 0.26312931]]),
np.array([[0.5171785, 0.23878628, 0.24403522],
[0.22141451, 0.64102704, 0.13755846],
[0.16751315, 0.18256843, 0.64991843],
[0.27357372, 0.55201592, 0.17441036],
[0.65745193, 0.26062899, 0.08191907]])]
for i in range(len(y_actual)):
assert_almost_equal(y_result[i], y_actual[i])
def test_multi_output_classification_sample_weights():
# weighted classifier
Xw = [[1, 2, 3], [4, 5, 6]]
yw = [[3, 2], [2, 3]]
w = np.asarray([2., 1.])
forest = RandomForestClassifier(n_estimators=10, random_state=1)
clf_w = MultiOutputClassifier(forest)
clf_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1, 2, 3], [1, 2, 3], [4, 5, 6]]
y = [[3, 2], [3, 2], [2, 3]]
forest = RandomForestClassifier(n_estimators=10, random_state=1)
clf = MultiOutputClassifier(forest)
clf.fit(X, y)
X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]]
assert_almost_equal(clf.predict(X_test), clf_w.predict(X_test))
def test_multi_output_classification_partial_fit_sample_weights():
# weighted classifier
Xw = [[1, 2, 3], [4, 5, 6], [1.5, 2.5, 3.5]]
yw = [[3, 2], [2, 3], [3, 2]]
w = np.asarray([2., 1., 1.])
sgd_linear_clf = SGDClassifier(random_state=1)
clf_w = MultiOutputClassifier(sgd_linear_clf)
clf_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1, 2, 3], [1, 2, 3], [4, 5, 6], [1.5, 2.5, 3.5]]
y = [[3, 2], [3, 2], [2, 3], [3, 2]]
sgd_linear_clf = SGDClassifier(random_state=1)
clf = MultiOutputClassifier(sgd_linear_clf)
clf.fit(X, y)
X_test = [[1.5, 2.5, 3.5]]
assert_array_almost_equal(clf.predict(X_test), clf_w.predict(X_test))
def test_multi_output_exceptions():
# NotFittedError when fit is not done but score, predict and
# and predict_proba are called
moc = MultiOutputClassifier(LinearSVC(random_state=0))
assert_raises(NotFittedError, moc.predict, y)
assert_raises(NotFittedError, moc.predict_proba, y)
assert_raises(NotFittedError, moc.score, X, y)
# ValueError when number of outputs is different
# for fit and score
y_new = np.column_stack((y1, y2))
moc.fit(X, y)
assert_raises(ValueError, moc.score, X, y_new)
# ValueError when y is continuous
assert_raise_message(ValueError, "Unknown label type", moc.fit, X, X[:, 1])
|
bsd-3-clause
|
victorbergelin/scikit-learn
|
examples/linear_model/plot_ridge_path.py
|
254
|
1655
|
"""
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
cbertinato/pandas
|
pandas/tests/frame/test_alter_axes.py
|
1
|
60751
|
from datetime import datetime, timedelta
import inspect
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_categorical_dtype, is_interval_dtype, is_object_dtype)
from pandas import (
Categorical, DataFrame, DatetimeIndex, Index, IntervalIndex, MultiIndex,
RangeIndex, Series, Timestamp, cut, date_range, to_datetime)
import pandas.util.testing as tm
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestDataFrameAlterAxes:
def test_set_index_directly(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df.index = idx
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match='Length mismatch'):
df.index = idx[::2]
def test_set_index(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df = df.set_index(idx)
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match='Length mismatch'):
df.set_index(idx[::2])
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame({'A': [1.1, 2.2, 3.3], 'B': [5.0, 6.1, 7.2]},
index=[2010, 2011, 2012])
df2 = df.set_index(df.index.astype(np.int32))
tm.assert_frame_equal(df, df2)
# A has duplicate values, C does not
@pytest.mark.parametrize('keys', ['A', 'C', ['A', 'B'],
('tuple', 'as', 'label')])
@pytest.mark.parametrize('inplace', [True, False])
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_drop_inplace(self, frame_of_index_cols,
drop, inplace, keys):
df = frame_of_index_cols
if isinstance(keys, list):
idx = MultiIndex.from_arrays([df[x] for x in keys], names=keys)
else:
idx = Index(df[keys], name=keys)
expected = df.drop(keys, axis=1) if drop else df
expected.index = idx
if inplace:
result = df.copy()
result.set_index(keys, drop=drop, inplace=True)
else:
result = df.set_index(keys, drop=drop)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize('keys', ['A', 'C', ['A', 'B'],
('tuple', 'as', 'label')])
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_append(self, frame_of_index_cols, drop, keys):
df = frame_of_index_cols
keys = keys if isinstance(keys, list) else [keys]
idx = MultiIndex.from_arrays([df.index] + [df[x] for x in keys],
names=[None] + keys)
expected = df.drop(keys, axis=1) if drop else df.copy()
expected.index = idx
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize('keys', ['A', 'C', ['A', 'B'],
('tuple', 'as', 'label')])
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_append_to_multiindex(self, frame_of_index_cols,
drop, keys):
# append to existing multiindex
df = frame_of_index_cols.set_index(['D'], drop=drop, append=True)
keys = keys if isinstance(keys, list) else [keys]
expected = frame_of_index_cols.set_index(['D'] + keys,
drop=drop, append=True)
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
def test_set_index_after_mutation(self):
# GH1590
df = DataFrame({'val': [0, 1, 2], 'key': ['a', 'b', 'c']})
expected = DataFrame({'val': [1, 2]},
Index(['b', 'c'], name='key'))
df2 = df.loc[df.index.map(lambda indx: indx >= 1)]
result = df2.set_index('key')
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# Add list-of-list constructor because list is ambiguous -> lambda
# also test index name if append=True (name is duplicate here for B)
@pytest.mark.parametrize('box', [Series, Index, np.array,
list, lambda x: [list(x)],
lambda x: MultiIndex.from_arrays([x])])
@pytest.mark.parametrize('append, index_name', [(True, None),
(True, 'B'), (True, 'test'), (False, None)])
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_pass_single_array(self, frame_of_index_cols,
drop, append, index_name, box):
df = frame_of_index_cols
df.index.name = index_name
key = box(df['B'])
if box == list:
# list of strings gets interpreted as list of keys
msg = "['one', 'two', 'three', 'one', 'two']"
with pytest.raises(KeyError, match=msg):
df.set_index(key, drop=drop, append=append)
else:
# np.array/list-of-list "forget" the name of B
name_mi = getattr(key, 'names', None)
name = [getattr(key, 'name', None)] if name_mi is None else name_mi
result = df.set_index(key, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, nothing is dropped
expected = df.set_index(['B'], drop=False, append=append)
expected.index.names = [index_name] + name if append else name
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# also test index name if append=True (name is duplicate here for A & B)
@pytest.mark.parametrize('box', [Series, Index, np.array, list,
lambda x: MultiIndex.from_arrays([x])])
@pytest.mark.parametrize('append, index_name',
[(True, None), (True, 'A'), (True, 'B'),
(True, 'test'), (False, None)])
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_pass_arrays(self, frame_of_index_cols,
drop, append, index_name, box):
df = frame_of_index_cols
df.index.name = index_name
keys = ['A', box(df['B'])]
# np.array/list "forget" the name of B
names = ['A', None if box in [np.array, list, tuple, iter] else 'B']
result = df.set_index(keys, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, only A is dropped, if at all
expected = df.set_index(['A', 'B'], drop=False, append=append)
expected = expected.drop('A', axis=1) if drop else expected
expected.index.names = [index_name] + names if append else names
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# We also emulate a "constructor" for the label -> lambda
# also test index name if append=True (name is duplicate here for A)
@pytest.mark.parametrize('box2', [Series, Index, np.array, list, iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name])
@pytest.mark.parametrize('box1', [Series, Index, np.array, list, iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name])
@pytest.mark.parametrize('append, index_name', [(True, None),
(True, 'A'), (True, 'test'), (False, None)])
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_pass_arrays_duplicate(self, frame_of_index_cols, drop,
append, index_name, box1, box2):
df = frame_of_index_cols
df.index.name = index_name
keys = [box1(df['A']), box2(df['A'])]
result = df.set_index(keys, drop=drop, append=append)
# if either box is iter, it has been consumed; re-read
keys = [box1(df['A']), box2(df['A'])]
# need to adapt first drop for case that both keys are 'A' --
# cannot drop the same column twice;
# use "is" because == would give ambiguous Boolean error for containers
first_drop = False if (
keys[0] is 'A' and keys[1] is 'A') else drop # noqa: F632
# to test against already-tested behaviour, we add sequentially,
# hence second append always True; must wrap keys in list, otherwise
# box = list would be interpreted as keys
expected = df.set_index([keys[0]], drop=first_drop, append=append)
expected = expected.set_index([keys[1]], drop=drop, append=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('append', [True, False])
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_pass_multiindex(self, frame_of_index_cols,
drop, append):
df = frame_of_index_cols
keys = MultiIndex.from_arrays([df['A'], df['B']], names=['A', 'B'])
result = df.set_index(keys, drop=drop, append=append)
# setting with a MultiIndex will never drop columns
expected = df.set_index(['A', 'B'], drop=False, append=append)
tm.assert_frame_equal(result, expected)
def test_set_index_verify_integrity(self, frame_of_index_cols):
df = frame_of_index_cols
with pytest.raises(ValueError, match='Index has duplicate keys'):
df.set_index('A', verify_integrity=True)
# with MultiIndex
with pytest.raises(ValueError, match='Index has duplicate keys'):
df.set_index([df['A'], df['A']], verify_integrity=True)
@pytest.mark.parametrize('append', [True, False])
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_raise_keys(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
with pytest.raises(KeyError, match="['foo', 'bar', 'baz']"):
# column names are A-E, as well as one tuple
df.set_index(['foo', 'bar', 'baz'], drop=drop, append=append)
# non-existent key in list with arrays
with pytest.raises(KeyError, match='X'):
df.set_index([df['A'], df['B'], 'X'], drop=drop, append=append)
msg = "[('foo', 'foo', 'foo', 'bar', 'bar')]"
# tuples always raise KeyError
with pytest.raises(KeyError, match=msg):
df.set_index(tuple(df['A']), drop=drop, append=append)
# also within a list
with pytest.raises(KeyError, match=msg):
df.set_index(['A', df['A'], tuple(df['A'])],
drop=drop, append=append)
@pytest.mark.parametrize('append', [True, False])
@pytest.mark.parametrize('drop', [True, False])
@pytest.mark.parametrize('box', [set], ids=['set'])
def test_set_index_raise_on_type(self, frame_of_index_cols, box,
drop, append):
df = frame_of_index_cols
msg = 'The parameter "keys" may be a column key, .*'
# forbidden type, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(box(df['A']), drop=drop, append=append)
# forbidden type in list, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(['A', df['A'], box(df['A'])],
drop=drop, append=append)
# MultiIndex constructor does not work directly on Series -> lambda
@pytest.mark.parametrize('box', [Series, Index, np.array, iter,
lambda x: MultiIndex.from_arrays([x])],
ids=['Series', 'Index', 'np.array',
'iter', 'MultiIndex'])
@pytest.mark.parametrize('length', [4, 6], ids=['too_short', 'too_long'])
@pytest.mark.parametrize('append', [True, False])
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_raise_on_len(self, frame_of_index_cols, box, length,
drop, append):
# GH 24984
df = frame_of_index_cols # has length 5
values = np.random.randint(0, 10, (length,))
msg = 'Length mismatch: Expected 5 rows, received array of length.*'
# wrong length directly
with pytest.raises(ValueError, match=msg):
df.set_index(box(values), drop=drop, append=append)
# wrong length in list
with pytest.raises(ValueError, match=msg):
df.set_index(['A', df.A, box(values)], drop=drop, append=append)
def test_set_index_custom_label_type(self):
# GH 24969
class Thing:
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing %r>" % (self.name,)
# necessary for pretty KeyError
__repr__ = __str__
thing1 = Thing('One', 'red')
thing2 = Thing('Two', 'blue')
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]},
index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing('Three', 'pink')
msg = "<Thing 'Three'>"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_hashable_iterable(self):
# GH 24969
# actual example discussed in GH 24984 was e.g. for shapely.geometry
# objects (e.g. a collection of Points) that can be both hashable and
# iterable; using frozenset as a stand-in for testing here
class Thing(frozenset):
# need to stabilize repr for KeyError (due to random order in sets)
def __repr__(self):
tmp = sorted(list(self))
# double curly brace prints one brace in format string
return "frozenset({{{}}})".format(', '.join(map(repr, tmp)))
thing1 = Thing(['One', 'red'])
thing2 = Thing(['Two', 'blue'])
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]},
index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing(['Three', 'pink'])
msg = r"frozenset\(\{'Three', 'pink'\}\)"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_type_raises(self):
# GH 24969
# purposefully inherit from something unhashable
class Thing(set):
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing %r>" % (self.name,)
thing1 = Thing('One', 'red')
thing2 = Thing('Two', 'blue')
df = DataFrame([[0, 2], [1, 3]], columns=[thing1, thing2])
msg = 'The parameter "keys" may be a column key, .*'
with pytest.raises(TypeError, match=msg):
# use custom label directly
df.set_index(thing2)
with pytest.raises(TypeError, match=msg):
# custom label wrapped in list
df.set_index([thing2])
def test_construction_with_categorical_index(self):
ci = tm.makeCategoricalIndex(10)
ci.name = 'B'
# with Categorical
df = DataFrame({'A': np.random.randn(10),
'B': ci.values})
idf = df.set_index('B')
tm.assert_index_equal(idf.index, ci)
# from a CategoricalIndex
df = DataFrame({'A': np.random.randn(10),
'B': ci})
idf = df.set_index('B')
tm.assert_index_equal(idf.index, ci)
# round-trip
idf = idf.reset_index().set_index('B')
tm.assert_index_equal(idf.index, ci)
def test_set_index_cast_datetimeindex(self):
df = DataFrame({'A': [datetime(2000, 1, 1) + timedelta(i)
for i in range(1000)],
'B': np.random.randn(1000)})
idf = df.set_index('A')
assert isinstance(idf.index, DatetimeIndex)
def test_convert_dti_to_series(self):
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
idx = DatetimeIndex(to_datetime(['2013-1-1 13:00',
'2013-1-2 14:00']),
name='B').tz_localize('US/Pacific')
df = DataFrame(np.random.randn(2, 1), columns=['A'])
expected = Series(np.array([Timestamp('2013-01-01 13:00:00-0800',
tz='US/Pacific'),
Timestamp('2013-01-02 14:00:00-0800',
tz='US/Pacific')],
dtype="object"), name='B')
# convert index to series
result = Series(idx)
tm.assert_series_equal(result, expected)
# assign to frame
df['B'] = idx
result = df['B']
tm.assert_series_equal(result, expected)
# convert to series while keeping the timezone
result = idx.to_series(keep_tz=True, index=[0, 1])
tm.assert_series_equal(result, expected)
# convert to utc
with tm.assert_produces_warning(FutureWarning):
df['B'] = idx.to_series(keep_tz=False, index=[0, 1])
result = df['B']
comp = Series(DatetimeIndex(expected.values).tz_localize(None),
name='B')
tm.assert_series_equal(result, comp)
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
msg = ("The default of the 'keep_tz' keyword in "
"DatetimeIndex.to_series will change to True in a future "
"release.")
assert msg in str(m[0].message)
with tm.assert_produces_warning(FutureWarning):
result = idx.to_series(keep_tz=False, index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
# list of datetimes with a tz
df['B'] = idx.to_pydatetime()
result = df['B']
tm.assert_series_equal(result, expected)
# GH 6785
# set the index manually
import pytz
df = DataFrame(
[{'ts': datetime(2014, 4, 1, tzinfo=pytz.utc), 'foo': 1}])
expected = df.set_index('ts')
df.index = df['ts']
df.pop('ts')
tm.assert_frame_equal(df, expected)
def test_reset_index_tz(self, tz_aware_fixture):
# GH 3950
# reset_index with single level
tz = tz_aware_fixture
idx = date_range('1/1/2011', periods=5,
freq='D', tz=tz, name='idx')
df = DataFrame({'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']},
index=idx)
expected = DataFrame({'idx': [datetime(2011, 1, 1),
datetime(2011, 1, 2),
datetime(2011, 1, 3),
datetime(2011, 1, 4),
datetime(2011, 1, 5)],
'a': range(5),
'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx', 'a', 'b'])
expected['idx'] = expected['idx'].apply(lambda d: Timestamp(d, tz=tz))
tm.assert_frame_equal(df.reset_index(), expected)
def test_set_index_timezone(self):
# GH 12358
# tz-aware Series should retain the tz
idx = to_datetime(["2014-01-01 10:10:10"],
utc=True).tz_convert('Europe/Rome')
df = DataFrame({'A': idx})
assert df.set_index(idx).index[0].hour == 11
assert DatetimeIndex(Series(df.A))[0].hour == 11
assert df.set_index(df.A).index[0].hour == 11
def test_set_index_dst(self):
di = date_range('2006-10-29 00:00:00', periods=3,
freq='H', tz='US/Pacific')
df = DataFrame(data={'a': [0, 1, 2], 'b': [3, 4, 5]},
index=di).reset_index()
# single level
res = df.set_index('index')
exp = DataFrame(data={'a': [0, 1, 2], 'b': [3, 4, 5]},
index=Index(di, name='index'))
tm.assert_frame_equal(res, exp)
# GH 12920
res = df.set_index(['index', 'a'])
exp_index = MultiIndex.from_arrays([di, [0, 1, 2]],
names=['index', 'a'])
exp = DataFrame({'b': [3, 4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp)
def test_reset_index_with_intervals(self):
idx = IntervalIndex.from_breaks(np.arange(11), name='x')
original = DataFrame({'x': idx, 'y': np.arange(10)})[['x', 'y']]
result = original.set_index('x')
expected = DataFrame({'y': np.arange(10)}, index=idx)
tm.assert_frame_equal(result, expected)
result2 = result.reset_index()
tm.assert_frame_equal(result2, original)
def test_set_index_multiindexcolumns(self):
columns = MultiIndex.from_tuples([('foo', 1), ('foo', 2), ('bar', 1)])
df = DataFrame(np.random.randn(3, 3), columns=columns)
result = df.set_index(df.columns[0])
expected = df.iloc[:, 1:]
expected.index = df.iloc[:, 0].values
expected.index.names = [df.columns[0]]
tm.assert_frame_equal(result, expected)
def test_set_index_empty_column(self):
# GH 1971
df = DataFrame([
{'a': 1, 'p': 0},
{'a': 2, 'm': 10},
{'a': 3, 'm': 11, 'p': 20},
{'a': 4, 'm': 12, 'p': 21}
], columns=('a', 'm', 'p', 'x'))
result = df.set_index(['a', 'x'])
expected = df[['m', 'p']]
expected.index = MultiIndex.from_arrays([df['a'], df['x']],
names=['a', 'x'])
tm.assert_frame_equal(result, expected)
def test_set_columns(self, float_string_frame):
cols = Index(np.arange(len(float_string_frame.columns)))
float_string_frame.columns = cols
with pytest.raises(ValueError, match='Length mismatch'):
float_string_frame.columns = cols[::2]
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range('2011/01/01', periods=6, freq='M', tz='US/Eastern')
idx2 = date_range('2013', periods=6, freq='A', tz='Asia/Tokyo')
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.reindex(idx2)
tm.assert_index_equal(df.index, idx2)
# GH 11314
# with tz
index = date_range(datetime(2015, 10, 1),
datetime(2015, 10, 1, 23),
freq='H', tz='US/Eastern')
df = DataFrame(np.random.randn(24, 1), columns=['a'], index=index)
new_index = date_range(datetime(2015, 10, 2),
datetime(2015, 10, 2, 23),
freq='H', tz='US/Eastern')
result = df.set_index(new_index)
assert result.index.freq == index.freq
# Renaming
def test_rename(self, float_frame):
mapping = {
'A': 'a',
'B': 'b',
'C': 'c',
'D': 'd'
}
renamed = float_frame.rename(columns=mapping)
renamed2 = float_frame.rename(columns=str.lower)
tm.assert_frame_equal(renamed, renamed2)
tm.assert_frame_equal(renamed2.rename(columns=str.upper),
float_frame, check_names=False)
# index
data = {
'A': {'foo': 0, 'bar': 1}
}
# gets sorted alphabetical
df = DataFrame(data)
renamed = df.rename(index={'foo': 'bar', 'bar': 'foo'})
tm.assert_index_equal(renamed.index, Index(['foo', 'bar']))
renamed = df.rename(index=str.upper)
tm.assert_index_equal(renamed.index, Index(['BAR', 'FOO']))
# have to pass something
with pytest.raises(TypeError, match="must pass an index to rename"):
float_frame.rename()
# partial columns
renamed = float_frame.rename(columns={'C': 'foo', 'D': 'bar'})
tm.assert_index_equal(renamed.columns, Index(['A', 'B', 'foo', 'bar']))
# other axis
renamed = float_frame.T.rename(index={'C': 'foo', 'D': 'bar'})
tm.assert_index_equal(renamed.index, Index(['A', 'B', 'foo', 'bar']))
# index with name
index = Index(['foo', 'bar'], name='name')
renamer = DataFrame(data, index=index)
renamed = renamer.rename(index={'foo': 'bar', 'bar': 'foo'})
tm.assert_index_equal(renamed.index,
Index(['bar', 'foo'], name='name'))
assert renamed.index.name == renamer.index.name
def test_rename_axis_inplace(self, float_frame):
# GH 15704
expected = float_frame.rename_axis('foo')
result = float_frame.copy()
no_return = result.rename_axis('foo', inplace=True)
assert no_return is None
tm.assert_frame_equal(result, expected)
expected = float_frame.rename_axis('bar', axis=1)
result = float_frame.copy()
no_return = result.rename_axis('bar', axis=1, inplace=True)
assert no_return is None
tm.assert_frame_equal(result, expected)
def test_rename_axis_warns(self):
# https://github.com/pandas-dev/pandas/issues/17833
df = DataFrame({"A": [1, 2], "B": [1, 2]})
with tm.assert_produces_warning(FutureWarning) as w:
df.rename_axis(id, axis=0)
assert 'rename' in str(w[0].message)
with tm.assert_produces_warning(FutureWarning) as w:
df.rename_axis({0: 10, 1: 20}, axis=0)
assert 'rename' in str(w[0].message)
with tm.assert_produces_warning(FutureWarning) as w:
df.rename_axis(id, axis=1)
assert 'rename' in str(w[0].message)
with tm.assert_produces_warning(FutureWarning) as w:
df['A'].rename_axis(id)
assert 'rename' in str(w[0].message)
def test_rename_axis_mapper(self):
# GH 19978
mi = MultiIndex.from_product([['a', 'b', 'c'], [1, 2]],
names=['ll', 'nn'])
df = DataFrame({'x': [i for i in range(len(mi))],
'y': [i * 10 for i in range(len(mi))]},
index=mi)
# Test for rename of the Index object of columns
result = df.rename_axis('cols', axis=1)
tm.assert_index_equal(result.columns,
Index(['x', 'y'], name='cols'))
# Test for rename of the Index object of columns using dict
result = result.rename_axis(columns={'cols': 'new'}, axis=1)
tm.assert_index_equal(result.columns,
Index(['x', 'y'], name='new'))
# Test for renaming index using dict
result = df.rename_axis(index={'ll': 'foo'})
assert result.index.names == ['foo', 'nn']
# Test for renaming index using a function
result = df.rename_axis(index=str.upper, axis=0)
assert result.index.names == ['LL', 'NN']
# Test for renaming index providing complete list
result = df.rename_axis(index=['foo', 'goo'])
assert result.index.names == ['foo', 'goo']
# Test for changing index and columns at same time
sdf = df.reset_index().set_index('nn').drop(columns=['ll', 'y'])
result = sdf.rename_axis(index='foo', columns='meh')
assert result.index.name == 'foo'
assert result.columns.name == 'meh'
# Test different error cases
with pytest.raises(TypeError, match='Must pass'):
df.rename_axis(index='wrong')
with pytest.raises(ValueError, match='Length of names'):
df.rename_axis(index=['wrong'])
with pytest.raises(TypeError, match='bogus'):
df.rename_axis(bogus=None)
@pytest.mark.parametrize('kwargs, rename_index, rename_columns', [
({'mapper': None, 'axis': 0}, True, False),
({'mapper': None, 'axis': 1}, False, True),
({'index': None}, True, False),
({'columns': None}, False, True),
({'index': None, 'columns': None}, True, True),
({}, False, False)])
def test_rename_axis_none(self, kwargs, rename_index, rename_columns):
# GH 25034
index = Index(list('abc'), name='foo')
columns = Index(['col1', 'col2'], name='bar')
data = np.arange(6).reshape(3, 2)
df = DataFrame(data, index, columns)
result = df.rename_axis(**kwargs)
expected_index = index.rename(None) if rename_index else index
expected_columns = columns.rename(None) if rename_columns else columns
expected = DataFrame(data, expected_index, expected_columns)
tm.assert_frame_equal(result, expected)
def test_rename_multiindex(self):
tuples_index = [('foo1', 'bar1'), ('foo2', 'bar2')]
tuples_columns = [('fizz1', 'buzz1'), ('fizz2', 'buzz2')]
index = MultiIndex.from_tuples(tuples_index, names=['foo', 'bar'])
columns = MultiIndex.from_tuples(
tuples_columns, names=['fizz', 'buzz'])
df = DataFrame([(0, 0), (1, 1)], index=index, columns=columns)
#
# without specifying level -> across all levels
renamed = df.rename(index={'foo1': 'foo3', 'bar2': 'bar3'},
columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'})
new_index = MultiIndex.from_tuples([('foo3', 'bar1'),
('foo2', 'bar3')],
names=['foo', 'bar'])
new_columns = MultiIndex.from_tuples([('fizz3', 'buzz1'),
('fizz2', 'buzz3')],
names=['fizz', 'buzz'])
tm.assert_index_equal(renamed.index, new_index)
tm.assert_index_equal(renamed.columns, new_columns)
assert renamed.index.names == df.index.names
assert renamed.columns.names == df.columns.names
#
# with specifying a level (GH13766)
# dict
new_columns = MultiIndex.from_tuples([('fizz3', 'buzz1'),
('fizz2', 'buzz2')],
names=['fizz', 'buzz'])
renamed = df.rename(columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'},
level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'},
level='fizz')
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples([('fizz1', 'buzz1'),
('fizz2', 'buzz3')],
names=['fizz', 'buzz'])
renamed = df.rename(columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'},
level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'},
level='buzz')
tm.assert_index_equal(renamed.columns, new_columns)
# function
func = str.upper
new_columns = MultiIndex.from_tuples([('FIZZ1', 'buzz1'),
('FIZZ2', 'buzz2')],
names=['fizz', 'buzz'])
renamed = df.rename(columns=func, level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level='fizz')
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples([('fizz1', 'BUZZ1'),
('fizz2', 'BUZZ2')],
names=['fizz', 'buzz'])
renamed = df.rename(columns=func, level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level='buzz')
tm.assert_index_equal(renamed.columns, new_columns)
# index
new_index = MultiIndex.from_tuples([('foo3', 'bar1'),
('foo2', 'bar2')],
names=['foo', 'bar'])
renamed = df.rename(index={'foo1': 'foo3', 'bar2': 'bar3'},
level=0)
tm.assert_index_equal(renamed.index, new_index)
def test_rename_nocopy(self, float_frame):
renamed = float_frame.rename(columns={'C': 'foo'}, copy=False)
renamed['foo'] = 1.
assert (float_frame['C'] == 1.).all()
def test_rename_inplace(self, float_frame):
float_frame.rename(columns={'C': 'foo'})
assert 'C' in float_frame
assert 'foo' not in float_frame
c_id = id(float_frame['C'])
float_frame = float_frame.copy()
float_frame.rename(columns={'C': 'foo'}, inplace=True)
assert 'C' not in float_frame
assert 'foo' in float_frame
assert id(float_frame['foo']) != c_id
def test_rename_bug(self):
# GH 5344
# rename set ref_locs, and set_index was not resetting
df = DataFrame({0: ['foo', 'bar'], 1: ['bah', 'bas'], 2: [1, 2]})
df = df.rename(columns={0: 'a'})
df = df.rename(columns={1: 'b'})
df = df.set_index(['a', 'b'])
df.columns = ['2001-01-01']
expected = DataFrame([[1], [2]],
index=MultiIndex.from_tuples(
[('foo', 'bah'), ('bar', 'bas')],
names=['a', 'b']),
columns=['2001-01-01'])
tm.assert_frame_equal(df, expected)
def test_rename_bug2(self):
# GH 19497
# rename was changing Index to MultiIndex if Index contained tuples
df = DataFrame(data=np.arange(3), index=[(0, 0), (1, 1), (2, 2)],
columns=["a"])
df = df.rename({(1, 1): (5, 4)}, axis="index")
expected = DataFrame(data=np.arange(3), index=[(0, 0), (5, 4), (2, 2)],
columns=["a"])
tm.assert_frame_equal(df, expected)
def test_rename_errors_raises(self):
df = DataFrame(columns=['A', 'B', 'C', 'D'])
with pytest.raises(KeyError, match='\'E\'] not found in axis'):
df.rename(columns={'A': 'a', 'E': 'e'}, errors='raise')
@pytest.mark.parametrize('mapper, errors, expected_columns', [
({'A': 'a', 'E': 'e'}, 'ignore', ['a', 'B', 'C', 'D']),
({'A': 'a'}, 'raise', ['a', 'B', 'C', 'D']),
(str.lower, 'raise', ['a', 'b', 'c', 'd'])])
def test_rename_errors(self, mapper, errors, expected_columns):
# GH 13473
# rename now works with errors parameter
df = DataFrame(columns=['A', 'B', 'C', 'D'])
result = df.rename(columns=mapper, errors=errors)
expected = DataFrame(columns=expected_columns)
tm.assert_frame_equal(result, expected)
def test_reorder_levels(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]],
names=['L0', 'L1', 'L2'])
df = DataFrame({'A': np.arange(6), 'B': np.arange(6)}, index=index)
# no change, position
result = df.reorder_levels([0, 1, 2])
tm.assert_frame_equal(df, result)
# no change, labels
result = df.reorder_levels(['L0', 'L1', 'L2'])
tm.assert_frame_equal(df, result)
# rotate, position
result = df.reorder_levels([1, 2, 0])
e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],
codes=[[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0]],
names=['L1', 'L2', 'L0'])
expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},
index=e_idx)
tm.assert_frame_equal(result, expected)
result = df.reorder_levels([0, 0, 0])
e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']],
codes=[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
names=['L0', 'L0', 'L0'])
expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},
index=e_idx)
tm.assert_frame_equal(result, expected)
result = df.reorder_levels(['L0', 'L0', 'L0'])
tm.assert_frame_equal(result, expected)
def test_reset_index(self, float_frame):
stacked = float_frame.stack()[::2]
stacked = DataFrame({'foo': stacked, 'bar': stacked})
names = ['first', 'second']
stacked.index.names = names
deleveled = stacked.reset_index()
for i, (lev, level_codes) in enumerate(zip(stacked.index.levels,
stacked.index.codes)):
values = lev.take(level_codes)
name = names[i]
tm.assert_index_equal(values, Index(deleveled[name]))
stacked.index.names = [None, None]
deleveled2 = stacked.reset_index()
tm.assert_series_equal(deleveled['first'], deleveled2['level_0'],
check_names=False)
tm.assert_series_equal(deleveled['second'], deleveled2['level_1'],
check_names=False)
# default name assigned
rdf = float_frame.reset_index()
exp = Series(float_frame.index.values, name='index')
tm.assert_series_equal(rdf['index'], exp)
# default name assigned, corner case
df = float_frame.copy()
df['index'] = 'foo'
rdf = df.reset_index()
exp = Series(float_frame.index.values, name='level_0')
tm.assert_series_equal(rdf['level_0'], exp)
# but this is ok
float_frame.index.name = 'index'
deleveled = float_frame.reset_index()
tm.assert_series_equal(deleveled['index'], Series(float_frame.index))
tm.assert_index_equal(deleveled.index,
Index(np.arange(len(deleveled))))
# preserve column names
float_frame.columns.name = 'columns'
resetted = float_frame.reset_index()
assert resetted.columns.name == 'columns'
# only remove certain columns
df = float_frame.reset_index().set_index(['index', 'A', 'B'])
rs = df.reset_index(['A', 'B'])
# TODO should reset_index check_names ?
tm.assert_frame_equal(rs, float_frame, check_names=False)
rs = df.reset_index(['index', 'A', 'B'])
tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False)
rs = df.reset_index(['index', 'A', 'B'])
tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False)
rs = df.reset_index('A')
xp = float_frame.reset_index().set_index(['index', 'B'])
tm.assert_frame_equal(rs, xp, check_names=False)
# test resetting in place
df = float_frame.copy()
resetted = float_frame.reset_index()
df.reset_index(inplace=True)
tm.assert_frame_equal(df, resetted, check_names=False)
df = float_frame.reset_index().set_index(['index', 'A', 'B'])
rs = df.reset_index('A', drop=True)
xp = float_frame.copy()
del xp['A']
xp = xp.set_index(['B'], append=True)
tm.assert_frame_equal(rs, xp, check_names=False)
def test_reset_index_name(self):
df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]],
columns=['A', 'B', 'C', 'D'],
index=Index(range(2), name='x'))
assert df.reset_index().index.name is None
assert df.reset_index(drop=True).index.name is None
df.reset_index(inplace=True)
assert df.index.name is None
def test_reset_index_level(self):
df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]],
columns=['A', 'B', 'C', 'D'])
for levels in ['A', 'B'], [0, 1]:
# With MultiIndex
result = df.set_index(['A', 'B']).reset_index(level=levels[0])
tm.assert_frame_equal(result, df.set_index('B'))
result = df.set_index(['A', 'B']).reset_index(level=levels[:1])
tm.assert_frame_equal(result, df.set_index('B'))
result = df.set_index(['A', 'B']).reset_index(level=levels)
tm.assert_frame_equal(result, df)
result = df.set_index(['A', 'B']).reset_index(level=levels,
drop=True)
tm.assert_frame_equal(result, df[['C', 'D']])
# With single-level Index (GH 16263)
result = df.set_index('A').reset_index(level=levels[0])
tm.assert_frame_equal(result, df)
result = df.set_index('A').reset_index(level=levels[:1])
tm.assert_frame_equal(result, df)
result = df.set_index(['A']).reset_index(level=levels[0],
drop=True)
tm.assert_frame_equal(result, df[['B', 'C', 'D']])
# Missing levels - for both MultiIndex and single-level Index:
for idx_lev in ['A', 'B'], ['A']:
with pytest.raises(KeyError, match='Level E '):
df.set_index(idx_lev).reset_index(level=['A', 'E'])
with pytest.raises(IndexError, match='Too many levels'):
df.set_index(idx_lev).reset_index(level=[0, 1, 2])
def test_reset_index_right_dtype(self):
time = np.arange(0.0, 10, np.sqrt(2) / 2)
s1 = Series((9.81 * time ** 2) / 2,
index=Index(time, name='time'),
name='speed')
df = DataFrame(s1)
resetted = s1.reset_index()
assert resetted['time'].dtype == np.float64
resetted = df.reset_index()
assert resetted['time'].dtype == np.float64
def test_reset_index_multiindex_col(self):
vals = np.random.randn(3, 3).astype(object)
idx = ['x', 'y', 'z']
full = np.hstack(([[x] for x in idx], vals))
df = DataFrame(vals, Index(idx, name='a'),
columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])
rs = df.reset_index()
xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],
['', 'mean', 'median', 'mean']])
tm.assert_frame_equal(rs, xp)
rs = df.reset_index(col_fill=None)
xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
tm.assert_frame_equal(rs, xp)
rs = df.reset_index(col_level=1, col_fill='blah')
xp = DataFrame(full, columns=[['blah', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
tm.assert_frame_equal(rs, xp)
df = DataFrame(vals,
MultiIndex.from_arrays([[0, 1, 2], ['x', 'y', 'z']],
names=['d', 'a']),
columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])
rs = df.reset_index('a', )
xp = DataFrame(full, Index([0, 1, 2], name='d'),
columns=[['a', 'b', 'b', 'c'],
['', 'mean', 'median', 'mean']])
tm.assert_frame_equal(rs, xp)
rs = df.reset_index('a', col_fill=None)
xp = DataFrame(full, Index(range(3), name='d'),
columns=[['a', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
tm.assert_frame_equal(rs, xp)
rs = df.reset_index('a', col_fill='blah', col_level=1)
xp = DataFrame(full, Index(range(3), name='d'),
columns=[['blah', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
tm.assert_frame_equal(rs, xp)
def test_reset_index_multiindex_nan(self):
# GH6322, testing reset_index on MultiIndexes
# when we have a nan or all nan
df = DataFrame({'A': ['a', 'b', 'c'],
'B': [0, 1, np.nan],
'C': np.random.rand(3)})
rs = df.set_index(['A', 'B']).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame({'A': [np.nan, 'b', 'c'],
'B': [0, 1, 2],
'C': np.random.rand(3)})
rs = df.set_index(['A', 'B']).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame({'A': ['a', 'b', 'c'],
'B': [0, 1, 2],
'C': [np.nan, 1.1, 2.2]})
rs = df.set_index(['A', 'B']).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame({'A': ['a', 'b', 'c'],
'B': [np.nan, np.nan, np.nan],
'C': np.random.rand(3)})
rs = df.set_index(['A', 'B']).reset_index()
tm.assert_frame_equal(rs, df)
def test_reset_index_with_datetimeindex_cols(self):
# GH5818
#
df = DataFrame([[1, 2], [3, 4]],
columns=date_range('1/1/2013', '1/2/2013'),
index=['A', 'B'])
result = df.reset_index()
expected = DataFrame([['A', 1, 2], ['B', 3, 4]],
columns=['index', datetime(2013, 1, 1),
datetime(2013, 1, 2)])
tm.assert_frame_equal(result, expected)
def test_reset_index_range(self):
# GH 12071
df = DataFrame([[0, 0], [1, 1]], columns=['A', 'B'],
index=RangeIndex(stop=2))
result = df.reset_index()
assert isinstance(result.index, RangeIndex)
expected = DataFrame([[0, 0, 0], [1, 1, 1]],
columns=['index', 'A', 'B'],
index=RangeIndex(stop=2))
tm.assert_frame_equal(result, expected)
def test_set_index_names(self):
df = tm.makeDataFrame()
df.index.name = 'name'
assert df.set_index(df.index).index.names == ['name']
mi = MultiIndex.from_arrays(df[['A', 'B']].T.values, names=['A', 'B'])
mi2 = MultiIndex.from_arrays(df[['A', 'B', 'A', 'B']].T.values,
names=['A', 'B', 'C', 'D'])
df = df.set_index(['A', 'B'])
assert df.set_index(df.index).index.names == ['A', 'B']
# Check that set_index isn't converting a MultiIndex into an Index
assert isinstance(df.set_index(df.index).index, MultiIndex)
# Check actual equality
tm.assert_index_equal(df.set_index(df.index).index, mi)
idx2 = df.index.rename(['C', 'D'])
# Check that [MultiIndex, MultiIndex] yields a MultiIndex rather
# than a pair of tuples
assert isinstance(df.set_index([df.index, idx2]).index, MultiIndex)
# Check equality
tm.assert_index_equal(df.set_index([df.index, idx2]).index, mi2)
def test_rename_objects(self, float_string_frame):
renamed = float_string_frame.rename(columns=str.upper)
assert 'FOO' in renamed
assert 'foo' not in renamed
def test_rename_axis_style(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=['X', 'Y'])
expected = DataFrame({"a": [1, 2], "b": [1, 2]}, index=['X', 'Y'])
result = df.rename(str.lower, axis=1)
tm.assert_frame_equal(result, expected)
result = df.rename(str.lower, axis='columns')
tm.assert_frame_equal(result, expected)
result = df.rename({"A": 'a', 'B': 'b'}, axis=1)
tm.assert_frame_equal(result, expected)
result = df.rename({"A": 'a', 'B': 'b'}, axis='columns')
tm.assert_frame_equal(result, expected)
# Index
expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=['x', 'y'])
result = df.rename(str.lower, axis=0)
tm.assert_frame_equal(result, expected)
result = df.rename(str.lower, axis='index')
tm.assert_frame_equal(result, expected)
result = df.rename({'X': 'x', 'Y': 'y'}, axis=0)
tm.assert_frame_equal(result, expected)
result = df.rename({'X': 'x', 'Y': 'y'}, axis='index')
tm.assert_frame_equal(result, expected)
result = df.rename(mapper=str.lower, axis='index')
tm.assert_frame_equal(result, expected)
def test_rename_mapper_multi(self):
df = DataFrame({"A": ['a', 'b'], "B": ['c', 'd'],
'C': [1, 2]}).set_index(["A", "B"])
result = df.rename(str.upper)
expected = df.rename(index=str.upper)
tm.assert_frame_equal(result, expected)
def test_rename_positional_named(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"a": [1, 2], "b": [1, 2]}, index=['X', 'Y'])
result = df.rename(str.lower, columns=str.upper)
expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_rename_axis_style_raises(self):
# see gh-12392
df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["0", "1"])
# Named target and axis
over_spec_msg = ("Cannot specify both 'axis' and "
"any of 'index' or 'columns'")
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(index=str.lower, axis=1)
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(index=str.lower, axis="columns")
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(columns=str.lower, axis="columns")
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(index=str.lower, axis=0)
# Multiple targets and axis
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(str.lower, str.lower, axis="columns")
# Too many targets
over_spec_msg = "Cannot specify all of 'mapper', 'index', 'columns'."
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(str.lower, str.lower, str.lower)
# Duplicates
with pytest.raises(TypeError, match="multiple values"):
df.rename(id, mapper=id)
def test_reindex_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
res1 = df.reindex(['b', 'a'])
res2 = df.reindex(index=['b', 'a'])
res3 = df.reindex(labels=['b', 'a'])
res4 = df.reindex(labels=['b', 'a'], axis=0)
res5 = df.reindex(['b', 'a'], axis=0)
for res in [res2, res3, res4, res5]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(columns=['e', 'd'])
res2 = df.reindex(['e', 'd'], axis=1)
res3 = df.reindex(labels=['e', 'd'], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(index=['b', 'a'], columns=['e', 'd'])
res2 = df.reindex(columns=['e', 'd'], index=['b', 'a'])
res3 = df.reindex(labels=['b', 'a'], axis=0).reindex(labels=['e', 'd'],
axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
def test_rename_positional(self):
df = DataFrame(columns=['A', 'B'])
with tm.assert_produces_warning(FutureWarning) as rec:
result = df.rename(None, str.lower)
expected = DataFrame(columns=['a', 'b'])
tm.assert_frame_equal(result, expected)
assert len(rec) == 1
message = str(rec[0].message)
assert 'rename' in message
assert 'Use named arguments' in message
def test_assign_columns(self, float_frame):
float_frame['hi'] = 'there'
df = float_frame.copy()
df.columns = ['foo', 'bar', 'baz', 'quux', 'foo2']
tm.assert_series_equal(float_frame['C'], df['baz'], check_names=False)
tm.assert_series_equal(float_frame['hi'], df['foo2'],
check_names=False)
def test_set_index_preserve_categorical_dtype(self):
# GH13743, GH13854
df = DataFrame({'A': [1, 2, 1, 1, 2],
'B': [10, 16, 22, 28, 34],
'C1': Categorical(list("abaab"),
categories=list("bac"),
ordered=False),
'C2': Categorical(list("abaab"),
categories=list("bac"),
ordered=True)})
for cols in ['C1', 'C2', ['A', 'C1'], ['A', 'C2'], ['C1', 'C2']]:
result = df.set_index(cols).reset_index()
result = result.reindex(columns=df.columns)
tm.assert_frame_equal(result, df)
def test_ambiguous_warns(self):
df = DataFrame({"A": [1, 2]})
with tm.assert_produces_warning(FutureWarning):
df.rename(id, id)
with tm.assert_produces_warning(FutureWarning):
df.rename({0: 10}, {"A": "B"})
def test_rename_signature(self):
sig = inspect.signature(DataFrame.rename)
parameters = set(sig.parameters)
assert parameters == {"self", "mapper", "index", "columns", "axis",
"inplace", "copy", "level", "errors"}
def test_reindex_signature(self):
sig = inspect.signature(DataFrame.reindex)
parameters = set(sig.parameters)
assert parameters == {"self", "labels", "index", "columns", "axis",
"limit", "copy", "level", "method",
"fill_value", "tolerance"}
def test_droplevel(self):
# GH20342
df = DataFrame([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]
])
df = df.set_index([0, 1]).rename_axis(['a', 'b'])
df.columns = MultiIndex.from_tuples([('c', 'e'), ('d', 'f')],
names=['level_1', 'level_2'])
# test that dropping of a level in index works
expected = df.reset_index('a', drop=True)
result = df.droplevel('a', axis='index')
tm.assert_frame_equal(result, expected)
# test that dropping of a level in columns works
expected = df.copy()
expected.columns = Index(['c', 'd'], name='level_1')
result = df.droplevel('level_2', axis='columns')
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestIntervalIndex:
def test_setitem(self):
df = DataFrame({'A': range(10)})
s = cut(df.A, 5)
assert isinstance(s.cat.categories, IntervalIndex)
# B & D end up as Categoricals
# the remainer are converted to in-line objects
# contining an IntervalIndex.values
df['B'] = s
df['C'] = np.array(s)
df['D'] = s.values
df['E'] = np.array(s.values)
assert is_categorical_dtype(df['B'])
assert is_interval_dtype(df['B'].cat.categories)
assert is_categorical_dtype(df['D'])
assert is_interval_dtype(df['D'].cat.categories)
assert is_object_dtype(df['C'])
assert is_object_dtype(df['E'])
# they compare equal as Index
# when converted to numpy objects
c = lambda x: Index(np.array(x))
tm.assert_index_equal(c(df.B), c(df.B), check_names=False)
tm.assert_index_equal(c(df.B), c(df.C), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
# B & D are the same Series
tm.assert_series_equal(df['B'], df['B'], check_names=False)
tm.assert_series_equal(df['B'], df['D'], check_names=False)
# C & E are the same Series
tm.assert_series_equal(df['C'], df['C'], check_names=False)
tm.assert_series_equal(df['C'], df['E'], check_names=False)
def test_set_reset_index(self):
df = DataFrame({'A': range(10)})
s = cut(df.A, 5)
df['B'] = s
df = df.set_index('B')
df = df.reset_index()
def test_set_axis_inplace(self):
# GH14636
df = DataFrame({'A': [1.1, 2.2, 3.3],
'B': [5.0, 6.1, 7.2],
'C': [4.4, 5.5, 6.6]},
index=[2010, 2011, 2012])
expected = {0: df.copy(),
1: df.copy()}
expected[0].index = list('abc')
expected[1].columns = list('abc')
expected['index'] = expected[0]
expected['columns'] = expected[1]
for axis in expected:
# inplace=True
# The FutureWarning comes from the fact that we would like to have
# inplace default to False some day
for inplace, warn in (None, FutureWarning), (True, None):
kwargs = {'inplace': inplace}
result = df.copy()
with tm.assert_produces_warning(warn):
result.set_axis(list('abc'), axis=axis, **kwargs)
tm.assert_frame_equal(result, expected[axis])
# inplace=False
result = df.set_axis(list('abc'), axis=axis, inplace=False)
tm.assert_frame_equal(expected[axis], result)
# omitting the "axis" parameter
with tm.assert_produces_warning(None):
result = df.set_axis(list('abc'), inplace=False)
tm.assert_frame_equal(result, expected[0])
# wrong values for the "axis" parameter
for axis in 3, 'foo':
with pytest.raises(ValueError, match='No axis named'):
df.set_axis(list('abc'), axis=axis, inplace=False)
def test_set_axis_prior_to_deprecation_signature(self):
df = DataFrame({'A': [1.1, 2.2, 3.3],
'B': [5.0, 6.1, 7.2],
'C': [4.4, 5.5, 6.6]},
index=[2010, 2011, 2012])
expected = {0: df.copy(),
1: df.copy()}
expected[0].index = list('abc')
expected[1].columns = list('abc')
expected['index'] = expected[0]
expected['columns'] = expected[1]
# old signature
for axis in expected:
with tm.assert_produces_warning(FutureWarning):
result = df.set_axis(axis, list('abc'), inplace=False)
tm.assert_frame_equal(result, expected[axis])
|
bsd-3-clause
|
kdmurray91/scikit-bio
|
skbio/stats/ordination/_redundancy_analysis.py
|
2
|
9325
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import numpy as np
import pandas as pd
from scipy.linalg import svd, lstsq
from skbio.util._decorator import experimental
from ._ordination_results import OrdinationResults
from ._utils import corr, svd_rank, scale
@experimental(as_of="0.4.0")
def rda(y, x, scale_Y=False, scaling=1):
r"""Compute redundancy analysis, a type of canonical analysis.
It is related to PCA and multiple regression because the explained
variables `y` are fitted to the explanatory variables `x` and PCA
is then performed on the fitted values. A similar process is
performed on the residuals.
RDA should be chosen if the studied gradient is small, and CCA
when it's large, so that the contingency table is sparse.
Parameters
----------
y : pd.DataFrame
:math:`n \times p` response matrix, where :math:`n` is the number
of samples and :math:`p` is the number of features. Its columns
need be dimensionally homogeneous (or you can set `scale_Y=True`).
This matrix is also referred to as the community matrix that
commonly stores information about species abundances
x : pd.DataFrame
:math:`n \times m, n \geq m` matrix of explanatory
variables, where :math:`n` is the number of samples and
:math:`m` is the number of metadata variables. Its columns
need not be standardized, but doing so turns regression
coefficients into standard regression coefficients.
scale_Y : bool, optional
Controls whether the response matrix columns are scaled to
have unit standard deviation. Defaults to `False`.
scaling : int
Scaling type 1 produces a distance biplot. It focuses on
the ordination of rows (samples) because their transformed
distances approximate their original euclidean
distances. Especially interesting when most explanatory
variables are binary.
Scaling type 2 produces a correlation biplot. It focuses
on the relationships among explained variables (`y`). It
is interpreted like scaling type 1, but taking into
account that distances between objects don't approximate
their euclidean distances.
See more details about distance and correlation biplots in
[1]_, \S 9.1.4.
Returns
-------
OrdinationResults
Object that stores the computed eigenvalues, the
proportion explained by each of them (per unit),
transformed coordinates for feature and samples, biplot
scores, sample constraints, etc.
See Also
--------
ca
cca
OrdinationResults
Notes
-----
The algorithm is based on [1]_, \S 11.1, and is expected to
give the same results as ``rda(y, x)`` in R's package vegan.
The eigenvalues reported in vegan are re-normalized to
:math:`\sqrt{\frac{s}{n-1}}` `n` is the number of samples,
and `s` is the original eigenvalues. Here we will only return
the original eigenvalues, as recommended in [1]_.
References
----------
.. [1] Legendre P. and Legendre L. 1998. Numerical
Ecology. Elsevier, Amsterdam.
"""
Y = y.as_matrix()
X = x.as_matrix()
n, p = y.shape
n_, m = x.shape
if n != n_:
raise ValueError(
"Both data matrices must have the same number of rows.")
if n < m:
# Mmm actually vegan is able to do this case, too
raise ValueError(
"Explanatory variables cannot have less rows than columns.")
sample_ids = y.index
feature_ids = y.columns
# Centre response variables (they must be dimensionally
# homogeneous)
Y = scale(Y, with_std=scale_Y)
# Centre explanatory variables
X = scale(X, with_std=False)
# Distribution of variables should be examined and transformed
# if necessary (see paragraph 4 in p. 580 L&L 1998)
# Compute Y_hat (fitted values by multivariate linear
# regression, that is, linear least squares). Formula 11.6 in
# L&L 1998 involves solving the normal equations, but that fails
# when cond(X) ~ eps**(-0.5). A more expensive but much more
# stable solution (fails when cond(X) ~ eps**-1) is computed
# using the QR decomposition of X = QR:
# (11.6) Y_hat = X [X' X]^{-1} X' Y
# = QR [R'Q' QR]^{-1} R'Q' Y
# = QR [R' R]^{-1} R'Q' Y
# = QR R^{-1} R'^{-1} R' Q' Y
# = Q Q' Y
# and B (matrix of regression coefficients)
# (11.4) B = [X' X]^{-1} X' Y
# = R^{-1} R'^{-1} R' Q' Y
# = R^{-1} Q'
# Q, R = np.linalg.qr(X)
# Y_hat = Q.dot(Q.T).dot(Y)
# B = scipy.linalg.solve_triangular(R, Q.T.dot(Y))
# This works provided X has full rank. When not, you can still
# fix it using R's pseudoinverse or partitioning R. To avoid any
# issues, like the numerical instability when trying to
# reproduce an example in L&L where X was rank-deficient, we'll
# just use `np.linalg.lstsq`, which uses the SVD decomposition
# under the hood and so it's also more expensive.
B, _, rank_X, _ = lstsq(X, Y)
Y_hat = X.dot(B)
# Now let's perform PCA on the fitted values from the multiple
# regression
u, s, vt = svd(Y_hat, full_matrices=False)
# vt are the right eigenvectors, which is what we need to
# perform PCA. That is, we're changing points in Y_hat from the
# canonical basis to the orthonormal basis given by the right
# eigenvectors of Y_hat (or equivalently, the eigenvectors of
# the covariance matrix Y_hat.T.dot(Y_hat))
# See 3) in p. 583 in L&L 1998
rank = svd_rank(Y_hat.shape, s)
# Theoretically, there're at most min(p, m, n - 1) non-zero eigenvalues
U = vt[:rank].T # U as in Fig. 11.2
# Ordination in the space of response variables. Its columns are
# sample scores. (Eq. 11.12)
F = Y.dot(U)
# Ordination in the space of explanatory variables. Its columns
# are fitted sample scores. (Eq. 11.13)
Z = Y_hat.dot(U)
# Canonical coefficients (formula 11.14)
# C = B.dot(U) # Not used
Y_res = Y - Y_hat
# PCA on the residuals
u_res, s_res, vt_res = svd(Y_res, full_matrices=False)
# See 9) in p. 587 in L&L 1998
rank_res = svd_rank(Y_res.shape, s_res)
# Theoretically, there're at most min(p, n - 1) non-zero eigenvalues as
U_res = vt_res[:rank_res].T
F_res = Y_res.dot(U_res) # Ordination in the space of residuals
eigenvalues = np.r_[s[:rank], s_res[:rank_res]]
# Compute scores
if scaling not in {1, 2}:
raise NotImplementedError("Only scalings 1, 2 available for RDA.")
# According to the vegan-FAQ.pdf, the scaling factor for scores
# is (notice that L&L 1998 says in p. 586 that such scaling
# doesn't affect the interpretation of a biplot):
pc_ids = ['RDA%d' % (i+1) for i in range(len(eigenvalues))]
eigvals = pd.Series(eigenvalues, index=pc_ids)
const = np.sum(eigenvalues**2)**0.25
if scaling == 1:
scaling_factor = const
elif scaling == 2:
scaling_factor = eigenvalues / const
feature_scores = np.hstack((U, U_res)) * scaling_factor
sample_scores = np.hstack((F, F_res)) / scaling_factor
feature_scores = pd.DataFrame(feature_scores,
index=feature_ids,
columns=pc_ids)
sample_scores = pd.DataFrame(sample_scores,
index=sample_ids,
columns=pc_ids)
# TODO not yet used/displayed
sample_constraints = pd.DataFrame(np.hstack((Z, F_res)) / scaling_factor,
index=sample_ids,
columns=pc_ids)
# Vegan seems to compute them as corr(X[:, :rank_X],
# u) but I don't think that's a good idea. In fact, if
# you take the example shown in Figure 11.3 in L&L 1998 you
# can see that there's an arrow for each of the 4
# environmental variables (depth, coral, sand, other) even if
# other = not(coral or sand)
biplot_scores = corr(X, u)
biplot_scores = pd.DataFrame(biplot_scores,
index=x.columns,
columns=pc_ids[:biplot_scores.shape[1]])
# The "Correlations of environmental variables with sample
# scores" from table 11.4 are quite similar to vegan's biplot
# scores, but they're computed like this:
# corr(X, F))
p_explained = pd.Series(eigenvalues / eigenvalues.sum(), index=pc_ids)
return OrdinationResults('RDA', 'Redundancy Analysis',
eigvals=eigvals,
proportion_explained=p_explained,
features=feature_scores,
samples=sample_scores,
biplot_scores=biplot_scores,
sample_constraints=sample_constraints)
|
bsd-3-clause
|
vshtanko/scikit-learn
|
sklearn/neighbors/tests/test_neighbors.py
|
103
|
41083
|
from itertools import product
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn.cross_validation import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([0.0], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_true(np.mean(rgs.predict(iris.data).round() == iris.target)
> 0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity')
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity')
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[])
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError,
nbrs.kneighbors_graph,
X, mode='blah')
assert_raises(ValueError,
nbrs.radius_neighbors_graph,
X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
metric = lambda x1, x2: np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto', metric=metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute', metric=metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(DeprecationWarning, neighbors.KNeighborsClassifier,
metric='wminkowski', w=np.ones(10))
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph,
nbrs1.radius_neighbors_graph(X).toarray())
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
|
bsd-3-clause
|
anguswilliams91/OnTheRun
|
code/fits.py
|
1
|
12759
|
from __future__ import division, print_function
import numpy as np, models as m, sql_utils as sql, pandas as pd,\
multiprocessing as mp, gus_utils as gu
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.integrate import fixed_quad
from functools import partial
from scipy.special import gamma
def construct_interpolator(data,tracer):
"""
Construct an interpolator for p(r) of the data
Arguments
---------
data: DataFrame
pandas dataframe with relevant info for the tracer
tracer: string
name of tracer
Returns
-------
endpoints: list
the centres of the extremal bins
spline: InterpolatedUnivariateSpline
spline object for p(r)
"""
r = m.compute_galactocentric_radii(data,tracer,append_dataframe=False)
if tracer=="kgiant" or tracer=="bhb":
r = r[r<50.]
elif tracer=="main_sequence":
r = r[r<20.]
pdf,bins = np.histogram(r,10,normed=True)
r_nodes = np.array([.5*(bins[i]+bins[i+1]) for i in np.arange(10)])
return ([np.min(r_nodes), np.max(r_nodes)],InterpolatedUnivariateSpline(r_nodes,pdf))
def vLOS_probability(v,vmin,k,spline,limits,params,model):
"""Calculate the probability density at a line of sight velocity given a model
with a particular set of parameters, and a selection function p(r).
Arguments
---------
v: float
line of sight velocity at which to evaluate the probability
vmin: float
the minimum line of sight velocity in the sample
k: float
the power law index of the speed distribution
spline: InterpolatedUnivariateSpline
a spline object for p(r)
limits: list
the upper and lower limits of p(r)
params: array_like
model parameters
model: string
the name of the model
Returns
-------
pdf: float
the probability density at v
"""
if v<vmin: return 0.
def numerator_integrand(r):
out = np.zeros_like(r)
vesc = m.vesc_model(r,0.,0.,params,model)
out[vesc<=v] = 0.
out[vesc>v] = (m.vesc_model(r[vesc>v],0.,0.,params,model) - v)**(k+1.) * spline(r[vesc>v])
return out
numerator = fixed_quad(numerator_integrand,limits[0],limits[1],n=12)[0]
def denominator_integrand(r):
return spline(r)*(m.vesc_model(r,0.,0.,params,model) - vmin)**(k+2.) / (k+2.)
denominator = fixed_quad(denominator_integrand,limits[0],limits[1],n=12)[0]
return numerator/denominator
def draw_samples(N,vmin,k,spline,limits,params,model):
"""
Given a model, draw a sample of size N from p(vLOS).
Arguments
---------
N: int
the number of points to draw
vmin: float
the minimum speed considered
k: float
the power law index of the speed distribution
spline: InterpolatedUnivariateSpline
spline of p(r) for this tracer
limits: list
the upper and lower limits of the spline
params: array_like
model parameters
model: string
name of model
Returns
-------
v: array_like
list of velocities sampled from p(vLOS)
"""
v = np.linspace(vmin,600.,100)
pdf = np.array([vLOS_probability(vi,vmin,k,spline,limits,params,model) for vi in v])
v_spline = InterpolatedUnivariateSpline(v,pdf)
cdf = np.array([fixed_quad(v_spline,vmin,vi)[0] for vi in v])
try:
idx = np.where(np.diff(cdf)<0.)[0][0]+1
except:
idx = None
v,cdf = v[:idx],cdf[:idx]
inv_cdf = InterpolatedUnivariateSpline(cdf,v)
u = np.random.uniform(size=N)
return inv_cdf(u)
def posterior_predictive_check(chain,tracer,model,vmin,nbins=20,thin_by=1,burnin=200):
"""
For every set of parameters in an MCMC chain, generate a mock data set of the same
size as the data.
Arguments
---------
chain: array_like [nsamples,ndim]
mcmc chain
tracer: string
type of tracer
model: string
the name of the model
vmin: float
the minimum speed considered
nbins: int (=20)
the number of bins in vLOS to use
thin_by: int(=1)
thin the chains by this factor
burnin: int(=200)
number of steps per walker to burn in
Returns
-------
bin_centres: array_like
centres of bins in vLOS
counts: array_like
the number counts of the data in each of the above bins
model_counts: array_like[nsamples,nstars]
the counts generated in each of the above bins in each mock sample
"""
n = m.get_numparams(model)
c = gu.reshape_chain(chain)[:,burnin::thin_by,:]
c = np.reshape(c, (c.shape[0]*c.shape[1],c.shape[2]))
samples = c[:,-n:]
if tracer == "main_sequence":
k = c[:,2]
data = pd.read_csv("/data/aamw3/SDSS/main_sequence.csv")
data = data[data.vgsr!=np.max(data.vgsr)].reset_index(drop=True) #remove the one outlier
elif tracer == "kgiant":
k = c[:,1]
data = pd.read_csv("/data/aamw3/SDSS/kgiant.csv")
else:
k = c[:,0]
data = pd.read_csv("/data/aamw3/SDSS/bhb.csv")
lims,spline = construct_interpolator(data,tracer)
data = data[np.abs(data.vgsr)>vmin].reset_index(drop=True)
N = len(data)
counts,bin_edges = np.histogram(np.abs(data.vgsr.values),nbins)
bin_centres = np.array([.5*(bin_edges[i] + bin_edges[i+1]) for i in np.arange(nbins)])
model_counts = np.zeros((len(k), nbins))
for i,theta in enumerate(samples):
v = draw_samples(N,vmin,k[i],spline,lims,theta,model)
model_counts[i,:],_ = np.histogram(v,bin_edges)
return bin_centres,counts,model_counts
def ppc_alltracers(fname,chain,model,vmin,nbins=[20,20,10],thin_by=1,burnin=200):
"""
generate mock samples for all of our tracer groups. Save all of the information
to file.
Arguments
---------
fname: string
name of file to write dictionaries to
chain: array_like [nsamples,ndim]
mcmc chain
model: string
the name of the model
vmin: float
the minimum speed considered
nbins: list(=[20,20,10])
the number of bins to use for each of the three tracers
thin_by: int(=1)
thin the chains by this factor
burnin: int(=200)
number of steps per walker to burn in
"""
tracers = ["main_sequence","kgiant","bhb"]
for i,tracer in enumerate(tracers):
bin_centres,data_counts,model_counts = posterior_predictive_check(chain,tracer,\
model,vmin,nbins=nbins[i],thin_by=thin_by,burnin=burnin)
summary = {'bin_centres': bin_centres, 'data_counts': data_counts, 'model_counts': model_counts}
np.save(fname+"_"+tracer,summary)
return None
def posterior_predictive(v,vmin,k_samples,spline,limits,param_samples,model):
"""
Compute the posterior predictive distribution at v given samples from the posterior
from an MCMC.
Arguments
---------
v: float
the line-of-sight velocity at which to compute the posterior predictive distribution
vmin: float
cut off speed
k_samples: array_like
MCMC samples of the slope of the speed distribution
spline: InterpolatedUnivariateSpline
a spline object for p(r)
limits: list
[rmin,rmax] for the spline
param_samples: array_like [n_params, n_samples]
samples of the potential parameters
model: string
name of model
"""
return np.mean(np.array([ vLOS_probability(v,vmin,k_samples[i],spline,limits,param_samples[i],model) \
for i in np.arange(len(k_samples))]))
def posterior_predictive_grid(v_grid,vmin,chain,model,tracer,burnin=200,pool_size=8):
"""
Compute the posterior predictive distribution given an MCMC chain and a model. Parallelise
over a given number of threads to speed up computation.
Arguments
---------
v_grid: array_like
an array of speeds at which to evaluate the posterior predictive distribution
vmin: float
the minimum speed considered
chain: array_like [nsamples,ndim]
an MCMC chain of model parameters
model: string
the name of the model
tracer: string
the type of tracer
burnin: int (=200)
the number of steps per walker to disregard as burn-in
pool_size: int (=8)
the size of the multiprocessing pool over which to distribute computation
Returns
-------
ppd: array_like
array of the same shape as v_grid, containing the posterior predictive probabilities
at each speed in v_grid
"""
#reshape the chain according to which model we're looking it
n = m.get_numparams(model)
c = gu.reshape_chain(chain)[:,burnin:,:]
c = np.reshape(c, (c.shape[0]*c.shape[1],c.shape[2]))
samples = c[:,-n:]
if tracer == "main_sequence":
k = c[:,2]
data = pd.read_csv("/data/aamw3/SDSS/main_sequence.csv")
lims,spline = construct_interpolator(data,"main_sequence")
elif tracer == "kgiant":
k = c[:,1]
data = pd.read_csv("/data/aamw3/SDSS/kgiant.csv")
lims,spline = construct_interpolator(data,"kgiant")
elif tracer == "bhb":
k = c[:,0]
data = pd.read_csv("/data/aamw3/SDSS/bhb.csv")
lims,spline = construct_interpolator(data,"bhb")
parfun = partial(posterior_predictive,vmin=vmin,k_samples=k,spline=spline,limits=lims\
,param_samples=samples,model=model)
pool = mp.Pool(pool_size)
output = pool.map(parfun,v_grid)
pool.close()
return output
def outlier_probabilities(params, data, vmin, model):
"""
Likelihood function template for given model.
Arguments
---------
params: array_like
the model parameters
data: list
the output of sample_distances_multiple_tracers
vmin: float
the minimum radial velocity considered
Returns
-------
logL: array_like
the sum of the log-likelihoods for this set of parameters.
"""
kbhb,kkgiant,kms,f = params[:4]
pot_params = params[4:]
outlier_probabilities = [None,None,None]
k = [kbhb,kkgiant,kms]
outlier_normalisation = ( .5*m.erfc( vmin / (np.sqrt(2.)*1000.) ) )**-1.
for i,tracer in enumerate(data):
l,b,v,s = tracer
x,y,z = gu.galactic2cartesian(s,b,l)
vesc = m.vesc_model(x,y,z,pot_params,model)
out = np.zeros_like(v)
with m.warnings.catch_warnings():
#deliberately getting NaNs here so stop python from telling us about it
m.warnings.simplefilter("ignore",category=RuntimeWarning)
out = (1.-f)*(k[i]+2)*(vesc - np.abs(v))**(k[i]+1.) / (vesc - vmin)**(k[i]+2.) + \
f*outlier_normalisation*m.Gaussian(np.abs(v),0.,1000.)
out[np.isnan(out)] = f*outlier_normalisation*m.Gaussian(np.abs(v[np.isnan(out)]),0.,1000.)
outlier = f*outlier_normalisation*m.Gaussian(np.abs(v),0.,1000.)
outlier_probabilities[i] = np.mean(outlier,axis=1) / np.mean(out, axis=1)
return outlier_probabilities
def check_outliers(chain,vmin,model,burnin=200):
"""
Compute the probabilities that stars are outliers using our MCMC chains. We are
being lazy and not marginalising over the posterior because this is a quick check.
"""
res = gu.ChainResults(chain,burnin=200)[:,0]
n = m.get_numparams(model)
data = m.sample_distances_multiple_tracers(n_samples=200,vmin=vmin)
return outlier_probabilities(res,data,vmin,model)
def gaia_crossmatch():
"""
Cross-match our MS targets to TGAS and check that they have small tangential motions
"""
ms = pd.read_csv("/data/aamw3/SDSS/main_sequence.csv")
query_str = "select ss.pmra_new,ss.pmdec_new from mytable as t\
left join lateral (select g.pmra_new,g.pmdec_new \
from gaia_dr1_aux.gaia_source_sdssdr9_xm_new as g \
where g.objid=t.objid order by g.dist \
asc limit 1) as ss on true"
pmra,pmdec = sql.local_join(query_str,'mytable',(ms.objid.values,),('objid',))
ms.loc[:,'pmra'] = pd.Series(pmra,index=ms.index)
ms.loc[:,'pmdec'] = pd.Series(pmdec,index=ms.index)
return ms
def main():
fname = "/data/aamw3/SDSS/model_comparison"
chain = np.genfromtxt("/data/aamw3/mcmc/escape_chains/spherical_powerlaw.dat")
ppc_alltracers(fname,chain,"spherical_powerlaw",200.,nbins=[20,20,10],thin_by=1,burnin=200)
if __name__ == "__main__":
main()
|
mit
|
ch3ll0v3k/scikit-learn
|
examples/ensemble/plot_forest_iris.py
|
335
|
6271
|
"""
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
|
bsd-3-clause
|
harshaneelhg/scikit-learn
|
examples/svm/plot_svm_kernels.py
|
329
|
1971
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
|
bsd-3-clause
|
DSLituiev/scikit-learn
|
examples/calibration/plot_compare_calibration.py
|
82
|
5012
|
"""
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probabilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilities to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilities closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
zorojean/scikit-learn
|
examples/svm/plot_svm_regression.py
|
249
|
1451
|
"""
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
|
bsd-3-clause
|
estnltk/textclassifier
|
textclassifier/classify.py
|
1
|
2265
|
# -*- coding: utf-8 -*-
"""Command line program for classification.
"""
from __future__ import unicode_literals, print_function, absolute_import
from .utils import read_dataset, write_dataset
from .utils import check_filename, load_classifier
import argparse
import sys
import pandas as pd
import logging
import codecs
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('classify')
parser = argparse.ArgumentParser(prog='textclassifier.classify')
parser.add_argument(
'indata',
help=('Path for the input dataset that will be classified. It is possible to load .csv and .xlsx files.'))
parser.add_argument(
'outdata',
help = 'Path where the classified dataset will be stored. It is possible to save .csv and .xlsx files')
parser.add_argument(
'model',
help='The path of the classification model.')
parser.add_argument(
'--insheet',
default=0,
help='Sheet name if reading data from Excel file (default is the first sheet).')
parser.add_argument(
'--insep',
default = ',',
help='Column separator for reading CSV files (default is ,).')
parser.add_argument(
'--outsheet',
default='Sheet1',
help='Sheet name if saving as an Excel file (default is Sheet1).')
parser.add_argument(
'--outsep',
default = ',',
help='Column separator for saving CSV files (default is ,).')
class ClassificationApp(object):
def __init__(self, args):
self._args = args
def run(self):
args = self._args
if args.indata == args.outdata:
print ('Indata and outdata point to same file. It is not allowed to minimize risk overwriting original training data')
sys.exit(0)
check_filename(args.indata)
check_filename(args.outdata)
dataframe = read_dataset(args.indata, args.insep, args.insheet)
clf = load_classifier(args.model)
logger.info('Performing classification on {0} examples.'.format(dataframe.shape[0]))
dataframe = clf.classify(dataframe)
write_dataset(args.outdata, dataframe, args.outsep, args.outsheet)
logger.info('Done!')
if __name__ == '__main__':
app = ClassificationApp(parser.parse_args())
app.run()
|
gpl-2.0
|
anomam/pvlib-python
|
pvlib/iotools/midc.py
|
4
|
9377
|
"""Functions to read NREL MIDC data.
"""
import io
import requests
import pandas as pd
# MIDC_VARIABLE_MAP maps some variables of interest at each MIDC site to their
# pvlib counterparts. The mapping dictionary for a site can be found by looking
# up the Site's id in the dictionary. It is not a comprehensive list, and may
# not be the best fit for your application, but should serve as a base for
# creating your own mappings.
#
# In particular, these mappings coincide with the raw ddata files.
# All site's field list can be found at:
# https://midcdmz.nrel.gov/apps/daily.pl?site=<SITE ID>&live=1
# Where id is the key found in this dictionary
MIDC_VARIABLE_MAP = {
'BMS': {
'Global CMP22 (vent/cor) [W/m^2]': 'ghi',
'Direct NIP [W/m^2]': 'dni',
'Diffuse CM22-1 (vent/cor) [W/m^2]': 'dhi',
'Avg Wind Speed @ 6ft [m/s]': 'wind_speed',
'Tower Dry Bulb Temp [deg C]': 'temp_air',
'Tower RH [%]': 'relative_humidity'},
'UOSMRL': {
'Global CMP22 [W/m^2]': 'ghi',
'Direct NIP [W/m^2]': 'dni',
'Diffuse Schenk [W/m^2]': 'dhi',
'Air Temperature [deg C]': 'temp_air',
'Relative Humidity [%]': 'relative_humidity',
'Avg Wind Speed @ 10m [m/s]': 'wind_speed'},
'HSU': {
'Global Horiz [W/m^2]': 'ghi',
'Direct Normal (calc) [W/m^2]': 'dni',
'Diffuse Horiz (band_corr) [W/m^2]': 'dhi'},
'UTPASRL': {
'Global Horizontal [W/m^2]': 'ghi',
'Direct Normal [W/m^2]': 'dni',
'Diffuse Horizontal [W/m^2]': 'dhi',
'CHP1 Temp [deg C]': 'temp_air'},
'UAT': {
'Global Horiz (platform) [W/m^2]': 'ghi',
'Direct Normal [W/m^2]': 'dni',
'Diffuse Horiz [W/m^2]': 'dhi',
'Air Temperature [deg C]': 'temp_air',
'Rel Humidity [%]': 'relative_humidity',
'Avg Wind Speed @ 3m [m/s]': 'wind_speed'},
'STAC': {
'Global Horizontal [W/m^2]': 'ghi',
'Direct Normal [W/m^2]': 'dni',
'Diffuse Horizontal [W/m^2]': 'dhi',
'Avg Wind Speed @ 10m [m/s]': 'wind_speed',
'Air Temperature [deg C]': 'temp_air',
'Rel Humidity [%]': 'relative_humidity'},
'UNLV': {
'Global Horiz [W/m^2]': 'ghi',
'Direct Normal [W/m^2]': 'dni',
'Diffuse Horiz (calc) [W/m^2]': 'dhi',
'Dry Bulb Temp [deg C]': 'temp_air',
'Avg Wind Speed @ 30ft [m/s]': 'wind_speed'},
'ORNL': {
'Global Horizontal [W/m^2]': 'ghi',
'Direct Normal [W/m^2]': 'dni',
'Diffuse Horizontal [W/m^2]': 'dhi',
'Air Temperature [deg C]': 'temp_air',
'Rel Humidity [%]': 'relative_humidity',
'Avg Wind Speed @ 42ft [m/s]': 'wind_speed'},
'NELHA': {
'Global Horizontal [W/m^2]': 'ghi',
'Air Temperature [W/m^2]': 'temp_air',
'Avg Wind Speed @ 10m [m/s]': 'wind_speed',
'Rel Humidity [%]': 'relative_humidity'},
'ULL': {
'Global Horizontal [W/m^2]': 'ghi',
'Direct Normal [W/m^2]': 'dni',
'Diffuse Horizontal [W/m^2]': 'dhi',
'Air Temperature [deg C]': 'temp_air',
'Rel Humidity [%]': 'relative_humidity',
'Avg Wind Speed @ 3m [m/s]': 'wind_speed'},
'VTIF': {
'Global Horizontal [W/m^2]': 'ghi',
'Direct Normal [W/m^2]': 'dni',
'Diffuse Horizontal [W/m^2]': 'dhi',
'Air Temperature [deg C]': 'temp_air',
'Avg Wind Speed @ 3m [m/s]': 'wind_speed',
'Rel Humidity [%]': 'relative_humidity'},
'NWTC': {
'Global PSP [W/m^2]': 'ghi',
'Temperature @ 2m [deg C]': 'temp_air',
'Avg Wind Speed @ 2m [m/s]': 'wind_speed',
'Relative Humidity [%]': 'relative_humidity'}}
# Maps problematic timezones to 'Etc/GMT' for parsing.
TZ_MAP = {
'PST': 'Etc/GMT+8',
'CST': 'Etc/GMT+6',
}
def format_index(data):
"""Create DatetimeIndex for the Dataframe localized to the timezone provided
as the label of the second (time) column.
Parameters
----------
data: Dataframe
Must contain 'DATE (MM/DD/YYYY)' column, second column must be labeled
with the timezone and contain times in 'HH:MM' format.
Returns
-------
data: Dataframe
Dataframe with DatetimeIndex localized to the provided timezone.
"""
tz_raw = data.columns[1]
timezone = TZ_MAP.get(tz_raw, tz_raw)
datetime = data['DATE (MM/DD/YYYY)'] + data[tz_raw]
datetime = pd.to_datetime(datetime, format='%m/%d/%Y%H:%M')
data = data.set_index(datetime)
data = data.tz_localize(timezone)
return data
def format_index_raw(data):
"""Create DatetimeIndex for the Dataframe localized to the timezone provided
as the label of the third column.
Parameters
----------
data: Dataframe
Must contain columns 'Year' and 'DOY'. Timezone must be found as the
label of the third (time) column.
Returns
-------
data: Dataframe
The data with a Datetime index localized to the provided timezone.
"""
tz_raw = data.columns[3]
timezone = TZ_MAP.get(tz_raw, tz_raw)
year = data.Year.apply(str)
jday = data.DOY.apply(lambda x: '{:03d}'.format(x))
time = data[tz_raw].apply(lambda x: '{:04d}'.format(x))
index = pd.to_datetime(year + jday + time, format="%Y%j%H%M")
data = data.set_index(index)
data = data.tz_localize(timezone)
return data
def read_midc(filename, variable_map={}, raw_data=False, **kwargs):
"""Read in National Renewable Energy Laboratory Measurement and
Instrumentation Data Center weather data. The MIDC is described in [1]_.
Parameters
----------
filename: string or file-like object
Filename, url, or file-like object of data to read.
variable_map: dictionary
Dictionary for mapping MIDC field names to pvlib names. Used to rename
the columns of the resulting DataFrame. Does not map names by default.
See Notes for an example.
raw_data: boolean
Set to true to use format_index_raw to correctly format the date/time
columns of MIDC raw data files.
kwargs : dict
Additional keyword arguments to pass to `pandas.read_csv`
Returns
-------
data: Dataframe
A dataframe with DatetimeIndex localized to the provided timezone.
Notes
-----
The `variable_map` argument should map fields from MIDC data to pvlib
names.
E.g. if a MIDC file contains the variable 'Global Horizontal [W/m^2]',
passing the dictionary below will rename the column to 'ghi' in
the returned Dataframe.
{'Global Horizontal [W/m^2]': 'ghi'}
See the MIDC_VARIABLE_MAP for collection of mappings by site.
For a full list of pvlib variable names see the `Variable Style Rules
<https://pvlib-python.readthedocs.io/en/latest/variables_style_rules.html>`_.
Be sure to check the units for the variables you will use on the
`MIDC site <https://midcdmz.nrel.gov/>`_.
References
----------
.. [1] NREL: Measurement and Instrumentation Data Center
`https://midcdmz.nrel.gov/ <https://midcdmz.nrel.gov/>`_
"""
data = pd.read_csv(filename, **kwargs)
if raw_data:
data = format_index_raw(data)
else:
data = format_index(data)
data = data.rename(columns=variable_map)
return data
def read_midc_raw_data_from_nrel(site, start, end, variable_map={},
timeout=30):
"""Request and read MIDC data directly from the raw data api.
Parameters
----------
site: string
The MIDC station id.
start: datetime
Start date for requested data.
end: datetime
End date for requested data.
variable_map: dict
A dictionary mapping MIDC field names to pvlib names. Used to
rename columns of the resulting DataFrame. See Notes of
:py:func:`pvlib.iotools.read_midc` for example.
timeout : float, default 30
Number of seconds to wait to connect/read from the API before
failing.
Returns
-------
data:
Dataframe with DatetimeIndex localized to the station location.
Raises
------
requests.HTTPError
For any error in retrieving the CSV file from the MIDC API
requests.Timeout
If data is not received in within ``timeout`` seconds
Notes
-----
Requests spanning an instrumentation change will yield an error. See the
MIDC raw data api page
`here <https://midcdmz.nrel.gov/apps/data_api_doc.pl?_idtextlist>`_
for more details and considerations.
"""
args = {'site': site,
'begin': start.strftime('%Y%m%d'),
'end': end.strftime('%Y%m%d')}
url = 'https://midcdmz.nrel.gov/apps/data_api.pl'
# NOTE: just use requests.get(url, params=args) to build querystring
# number of header columns and data columns do not always match,
# so first parse the header to determine the number of data columns
# to parse
csv_request = requests.get(url, timeout=timeout, params=args)
csv_request.raise_for_status()
raw_csv = io.StringIO(csv_request.text)
first_row = pd.read_csv(raw_csv, nrows=0)
col_length = len(first_row.columns)
raw_csv.seek(0)
return read_midc(raw_csv, variable_map=variable_map, raw_data=True,
usecols=range(col_length))
|
bsd-3-clause
|
lucasdavid/Manifold-Learning
|
experiments/reducing/iso_landmark.py
|
1
|
1867
|
import time
from sklearn import datasets
from experiments.base import ReductionExperiment
class LIsomapExperiment(ReductionExperiment):
title = 'L-Isomap Experiment'
plotting = True
benchmarks = (
{'method': 'skisomap', 'samples': 1000, 'params': {'n_neighbors': 10, 'n_components': 2}},
{'method': 'lisomap', 'samples': 1000, 'params': {'n_neighbors': 10, 'n_components': 2}},
{'method': 'skisomap', 'samples': 4000, 'params': {'n_neighbors': 10, 'n_components': 2}},
{'method': 'lisomap', 'samples': 4000, 'params': {'n_neighbors': 10, 'n_components': 2}},
# {'method': 'skisomap', 'samples': 10000, 'params': {'n_neighbors': 10, 'n_components': 2}},
{'method': 'lisomap', 'samples': 10000, 'params': {'n_neighbors': 10, 'n_components': 2}},
{'method': 'lisomap', 'samples': 30000, 'params': {'n_neighbors': 10, 'n_components': 2}},
)
def generate_data(self, samples):
self.data, self.target = datasets.make_swiss_roll(n_samples=samples, random_state=0)
self.original_data = self.data
if self.plotting:
self.displayer.load(self.data, self.target)
print('Data set size: %.2fKB' % (self.data.nbytes / 1024))
print('Shape: %s' % str(self.data.shape))
def _run(self):
start = time.time()
try:
for benchmark in self.benchmarks:
self.generate_data(benchmark['samples'])
self.reduction_method = benchmark['method']
self.reduction_params = benchmark['params']
self.reduce()
except KeyboardInterrupt:
print('cancelled.', end=' ')
print('Time elapsed: %.2f sec.' % (time.time() - start))
if self.plotting:
self.displayer.show()
if __name__ == '__main__':
LIsomapExperiment().start()
|
mit
|
Sohojoe/damon
|
damon1/read_winsteps.py
|
1
|
8371
|
# -*- coding: utf-8 -*-
# opts.py
"""template.py
Template for writing Damon programs.
Copyright (c) 2009 - 2011, [Developer Name] for [Company Name].
Purpose:
Damon Version:
Python Version:
Numpy Version:
License
-------
This program references one or more software modules that are
under copyright to Pythias Consulting, LLC. Therefore, it is subject
to either the Gnu Affero General Public License or the Pythias
Commercial License, a copy of which is contained in the current
working directory.
How To Use
----------
You can run Damon from the prompt interactively on IDLE, or
you can run it from scripts. This template is for writing Damon
scripts. It doesn't contain much, but saves the time of
writing the necessary import statements. It also contains
a handy lookup reference of the various methods you will need.
Open the template from the Python shell menu by selecting
file/Open Module. type_: damon1.template
Save the template under a different name to a directory of
your choice. (If you forget this step, you will overwrite
the template and have to create a new one.)
Write code and run using the F5 key.
If you use the template to define functions, you can run
the functions from this module using the "if __name__ == "__main__":
trick at the bottom of the module. (See Python docs.)
A tutorial can be found on the Pythias website. Most of the
documentation is found at the level of individual functions
and methods and can be accessed using help().
Top-level documentation can be accessed using:
>>> import damon1
>>> help(damon1)
That will get you started.
Cheatsheet of Damon Methods
---------------------------
In (approximate) order of application:
d = create_data()['data'] => Create artificial Damon objects
d = TopDamon() => Create a Damon object from an existing dataset
d = Damon(data,'array',...) => More generic low-level way to create a Damon object
d.merge_info() => Merge row or column info into labels
d.extract_valid() => Extract only valid rows/cols
d.pseudomiss() => Create index of pseudo-missing cells
d.score_mc() => Score multiple-choice data
d.subscale() => Append raw scores for item subscales
d.parse() => Parse response options to separate columns
d.standardize() => Convert all columns into a standard metric
d.rasch() => Rasch-analyze data (in place of coord())
d.coord() => Calculate row and column coordinates
d.sub_coord() => Calculate coordinates given multiple subspaces (in place of coord)
d.objectify() => Maximize objectivity of specified columns (in place of coord)
d.base_est() => Calculate cell estimates
d.base_resid() => Get residuals (observation - estimate)
d.base_ear() => Get expected absolute residuals
d.base_se() => Get standard errors for all cells
d.equate() => Equate two datasets using a bank
d.base_fit() => Get cell fit statistics
d.fin_est() => Get final estimates, original metric
d.est2logit() => Convert estimates to logits
d.item_diff() => Get probability-based item difficulties
d.fillmiss() => Fill missing cells of original dataset
d.fin_resid() => Get final cell residuals, original metric
d.fin_fit() => Get final cell fit, original metric
d.restore_invalid() => Restores invalid rows/cols to output arrays
d.summstat() => Get summary row/column/range statistics
d.bank() => Save row/column coordinates in "bank" file
d.export() => Export specified outputs as files
"""
import os
import sys
import numpy as np
import numpy.random as npr
import numpy.linalg as npla
import numpy.ma as npma
np.seterr(all='ignore')
try:
import matplotlib.pyplot as plt
except ImportError:
pass
import damon1 as damon1
import damon1.core as dmn
import damon1.tools as dmnt
import csv
def read_winsteps(data):
"""Convert Winsteps control file in Damon object
Returns
-------
{'data':Damon object,
'anskey':answer key
}
Comments
--------
This function was a quick and dirty effort to
read a Winsteps control file for a particular case.
It probably won't work on your files without some
editing. Save a copy and edit it to fit your situation.
Arguments
---------
"data" is a path name to a Winsteps control file that
contains both specifications and data.
"""
clean_lines = []
# Get clean list of lines, capturing some variables
with open(data, 'rb') as f:
lines = f.readlines()
for i, line in enumerate(lines):
line = line.replace('"',"").strip()
clean_lines.append(line)
if 'Item1' in line:
start_resp = int(line[line.find('Item1') + 6:]) - 1
if 'Name1' in line:
start_name = int(line[line.find('Name1') + 6:]) - 1
if 'Codes' in line:
validchars_ = line[line.find('Codes') + 6:]
if 'Key' in line:
key = line[line.find('Key') + 4:]
if '&END' in line:
start_items = i + 1
if 'END NAMES' in line:
stop_items = i
start_data = i + 1
# Get varianbles
items = clean_lines[start_items:stop_items]
validchars = ['All', list(validchars_)]
anskey = dict(zip(items, list(key)))
data_lines = clean_lines[start_data:]
persons = []
person_resps = []
nitems = len(items)
# Read the data file, parse out persons
for line in data_lines:
x = line[start_name:start_resp].strip()
person = x.replace(' ', '') # Remove gaps in person ids (temp)
persons.append(person)
resps = list(line[start_resp:start_resp + nitems])
person_resps.append(resps)
# Convert into arrays
persons.insert(0, 'id')
items.insert(0, 'id')
rowlabels = np.array(persons)[:, np.newaxis]
collabels = np.array(items)[np.newaxis, :]
coredata = np.array(person_resps)
# Build datadict for Damon
datadict = {'rowlabels':rowlabels,
'collabels':collabels,
'coredata':coredata,
'nheaders4rows':1,
'key4rows':0,
'rowkeytype':'S60',
'nheaders4cols':1,
'key4cols':0,
'colkeytype':'S60',
'validchars':validchars,
'nanval':'-999',
}
d = dmn.Damon(datadict, 'datadict', verbose=True)
return {'data':d,
'anskey':anskey}
##############
## Run ##
## Module ##
##############
# To run functions that are defined in this module
if __name__ == "__main__":
workfile = 'Mark_Medical_Con.txt'
a = read_winsteps(workfile)
d = a['data']
ak = a['anskey']
d.score_mc(['Cols', ak])
print 'd.score_mc_out=\n', d.score_mc_out['coredata']
d.standardize()
d.coord([[3]])
d.base_est()
d.base_resid()
d.base_ear()
d.base_se()
d.base_fit()
d.est2logit()
d.summstat('base_est_out', ['Mean', 'SD', 'SE', 'Rel',
'Fit_MeanSq', 'Fit_Perc>2',
'Count', 'Min', 'Max'],
getrows='SummWhole')
d.export(['row_ents_out', 'col_ents_out'], outprefix='Medical')
## dim = d.objperdim.core_col['Dim']
## acc = d.objperdim.core_col['Acc']
## stab = d.objperdim.core_col['Stab']
## obj = d.objperdim.core_col['Obj']
##
## plt.plot(dim, acc, 'r-', label='Accuracy')
## plt.plot(dim, stab, 'b-', label='Stability')
## plt.plot(dim, obj, 'k-', label='Objectivity')
## plt.xlabel('Dimensionality')
## plt.ylabel('Objectivity Stats')
## plt.legend()
## plt.savefig('read_winsteps.png')
## plt.clf()
#d.export(['data_out'], outsuffix = '.txt', delimiter='\t')
|
apache-2.0
|
lenovor/scikit-learn
|
sklearn/decomposition/nmf.py
|
100
|
19059
|
""" Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <[email protected]>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import nnls
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.validation import check_is_fitted, check_non_negative
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
random_state = check_random_state(random_state)
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix factorization.
Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(19):
# Gradient step.
Hn = H - alpha * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_alpha = not suff_decr
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[ 0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
References
----------
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
rng = check_random_state(self.random_state)
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_, random_state=rng)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a',
random_state=rng)
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar',
random_state=rng)
elif init == "random":
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W.T, gradW.T, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
tol = self.tol * init_grad
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < tol:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
sqnorm_X = np.dot(X.data, X.data)
norm_WHT = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(sqnorm_X + norm_WHT - 2. * cross_prod)
self.reconstruction_err_ = error
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
H[H == 0] = 0 # fix up negative zeros
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit. Solving for W exactly.")
return self.transform(X)
self.n_iter_ = n_iter
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
check_is_fitted(self, 'n_components_')
X = check_array(X, accept_sparse='csc')
Wt = np.zeros((self.n_components_, X.shape[0]))
check_non_negative(X, "ProjectedGradientNMF.transform")
if sp.issparse(X):
Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
tol=self.tol,
max_iter=self.nls_max_iter)
else:
for j in range(0, X.shape[0]):
Wt[:, j], _ = nnls(self.components_.T, X[j, :])
return Wt.T
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
|
bsd-3-clause
|
plumed/plumed2
|
python/test/test_pandas.py
|
3
|
2641
|
import plumed
import filecmp
import os
from contextlib import contextmanager
@contextmanager
def cd(newdir):
prevdir = os.getcwd()
os.chdir(newdir)
try:
yield
finally:
os.chdir(prevdir)
def test1():
with cd('test/'):
d=plumed.read_as_pandas("COLVAR",enable_constants='columns')
print(d,file=open("dataframe","wt"))
assert filecmp.cmp("dataframe","dataframe.ref")
def test2():
with cd('test/'):
d=plumed.read_as_pandas("COLVAR_gzipped.gz",enable_constants='columns')
print(d,file=open("dataframe","wt"))
assert filecmp.cmp("dataframe","dataframe.ref")
def test3():
with cd('test/'):
i=0
for d in plumed.read_as_pandas("COLVAR",chunksize=4,enable_constants='columns'):
print(d,file=open("dataframe."+str(i),"wt"))
assert filecmp.cmp("dataframe."+str(i),"dataframe."+str(i)+".ref")
i=i+1
def test4():
with cd('test/'):
i=0
for d in plumed.read_as_pandas("COLVAR_gzipped.gz",chunksize=4,enable_constants='columns'):
print(d,file=open("dataframe."+str(i),"wt"))
assert filecmp.cmp("dataframe."+str(i),"dataframe."+str(i)+".ref")
i=i+1
def test5():
with cd('test/'):
d=plumed.read_as_pandas("COLVAR",enable_constants='metadata')
print(d,file=open("dataframe_noconst","wt"))
assert filecmp.cmp("dataframe_noconst","dataframe_noconst.ref")
assert d.plumed_constants[0][0]=="a"
assert d.plumed_constants[0][2]=="pi"
def test6():
with cd('test/'):
i=0
for d in plumed.read_as_pandas("COLVAR",chunksize=4,enable_constants='metadata'):
print(d,file=open("dataframe_noconst."+str(i),"wt"))
assert filecmp.cmp("dataframe_noconst."+str(i),"dataframe_noconst."+str(i)+".ref")
assert d.plumed_constants[0][0]=="a"
assert d.plumed_constants[0][2]=="pi"
i=i+1
def test7():
with cd('test/'):
d=plumed.read_as_pandas("COLVAR")
plumed.write_pandas(d,"COLVAR_write1")
assert filecmp.cmp("COLVAR_write1","COLVAR_write.ref")
d=plumed.read_as_pandas("COLVAR",index_col='time')
plumed.write_pandas(d,"COLVAR_write2")
assert filecmp.cmp("COLVAR_write2","COLVAR_write.ref")
d=plumed.read_as_pandas("COLVAR",index_col=('time','psi'))
try:
plumed.write_pandas(d,"COLVAR_write3")
assert False
except TypeError:
pass
if __name__ == "__main__":
test1()
test2()
test3()
test4()
test5()
test6()
test7()
|
lgpl-3.0
|
Barmaley-exe/scikit-learn
|
examples/ensemble/plot_gradient_boosting_regression.py
|
227
|
2520
|
"""
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the Boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, boston.feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
|
bsd-3-clause
|
appapantula/scikit-learn
|
sklearn/decomposition/tests/test_nmf.py
|
130
|
6059
|
import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
# Test NNDSVD behaviour on negative input
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
# Test that NNDSVD does not return negative values
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
# the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
# Test model fit behaviour on negative input
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
# Test that the fit is not too far away
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
# Test that NMF.transform returns close values
# (transform uses scipy.optimize.nnls for now)
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF(random_state=42)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
|
bsd-3-clause
|
lucventurini/mikado
|
Mikado/subprograms/util/collapse_compare_stats.py
|
1
|
9927
|
#!/usr/bin/env python3
import argparse
import collections
import sys
import tabulate
import pandas as pd
import re
__doc__ = "Script to collapse various stat files into one."
# 1 Command line:
# 2 /home/lucve/miniconda3/envs/mikado2/bin/mikado compare -r reference.gff3 -p Daijin/5-mikado/pick/permissive/mikado-permissive.loci.gff3 -o compare -l compare.log
# 3 18 reference RNAs in 12 genes
# 4 22 predicted RNAs in 15 genes
# 5 --------------------------------- | Sn | Pr | F1 |
# 6 Base level: 94.90 83.22 88.68
# 7 Exon level (stringent): 80.56 71.60 75.82
# 8 Exon level (lenient): 91.18 76.54 83.22
# 9 Splice site level: 95.19 81.15 87.61
# 10 Intron level: 96.84 88.19 92.31
# 11 Intron level (NR): 94.34 79.37 86.21
# 12 Intron chain level: 69.23 50.00 58.06
# 13 Intron chain level (NR): 69.23 50.00 58.06
# 14 Transcript level (stringent): 55.56 45.45 50.00
# 15 Transcript level (>=95% base F1): 72.22 59.09 65.00
# 16 Transcript level (>=80% base F1): 72.22 59.09 65.00
# 17 Gene level (100% base F1): 75.00 60.00 66.67
# 18 Gene level (>=95% base F1): 83.33 66.67 74.07
# 19 Gene level (>=80% base F1): 83.33 66.67 74.07
# 20
# 21 # Matching: in prediction; matched: in reference.
# 22
# 23 Matching intron chains: 9
# 24 Matched intron chains: 9
# 25 Matching monoexonic transcripts: 4
# 26 Matched monoexonic transcripts: 4
# 27 Total matching transcripts: 13
# 28 Total matched transcripts: 13
# 29
# 30 Missed exons (stringent): 14/72 (19.44%)
# 31 Novel exons (stringent): 23/81 (28.40%)
# 32 Missed exons (lenient): 6/68 (8.82%)
# 33 Novel exons (lenient): 19/81 (23.46%)
# 34 Missed introns: 3/53 (5.66%)
# 35 Novel introns: 13/63 (20.63%)
# 36
# 37 Missed transcripts (0% nF1): 0/18 (0.00%)
# 38 Novel transcripts (0% nF1): 3/22 (13.64%)
# 39 Missed genes (0% nF1): 0/12 (0.00%)
# 40 Novel genes (0% nF1): 3/15 (20.00%)
digit = r"[0-9]*\.?[0-9]+"
accuracy_pat = re.compile(r"^\s+(.*):\s+({digit})\s+({digit})\s+({digit})\s+$".format(digit=digit))
match_pat = re.compile(r"\s+(.*): (\d+)")
missed_novel_pat = re.compile(r"\s+(.*): (\d+)/(\d+)\s+\(({digit})%\)".format(digit=digit))
accuracy_stats = {
6: "Base level", 7: "Exon level (stringent)", 8: "Exon level (lenient)", 9: "Splice site level",
10: "Intron level", 11: "Intron level (NR)", 12: "Intron chain level", 13: "Intron chain level (NR)",
14: "Transcript level (stringent)", 15: "Transcript level (>=95% base F1)",
16: "Transcript level (>=80% base F1)", 17: "Gene level (100% base F1)", 18: "Gene level (>=95% base F1)",
19: "Gene level (>=80% base F1)"
}
match_stats = {
23: "Matching intron chains", 24: "Matched intron chains",
25: "Matching monoexonic transcripts", 26: "Matched monoexonic transcripts",
27: "Total matching transcripts", 28: "Total matched transcripts"
}
novel_missed_stats = {
30: "Missed exons (stringent)", 31: "Novel exons (stringent)", 32: "Missed exons (lenient)",
33: "Novel exons (lenient)", 34: "Missed introns", 35: "Novel introns",
37: "Missed transcripts (0% nF1)", 38: "Novel transcripts (0% nF1)",
39: "Missed genes (0% nF1)", 40: "Novel genes (0% nF1)"
}
def accuracy_retrieval(line: str, requested: str) -> (float, float, float):
level, sn, pr, f1 = accuracy_pat.search(line).groups()
assert level == requested, (line, requested)
sn, pr, f1 = [float(val) for val in (sn, pr, f1)]
assert 0 <= min([sn, pr, f1]) <= max([sn, pr, f1]) <= 100
return sn, pr, f1
def match_retrieval(line: str, requested: str) -> int:
level, match = match_pat.search(line).groups()
assert requested == level, (line, requested)
match = int(match)
assert match >= 0
return match
def missed_novel_retrieval(line: str, requested: str) -> (int, int, float):
level, found, maximum, proportion = missed_novel_pat.search(line).groups()
assert level == requested, (line, requested)
found, maximum = int(found), int(maximum)
proportion = float(proportion)
assert 0 <= proportion <= 100
proportion = f"{proportion}%"
return found, maximum, proportion
def launch(args):
if args.avf is True:
print("Available formats for this script:")
print(*tabulate.tabulate_formats, sep="\n")
sys.exit(0)
data = {
"Sn": dict((key, collections.defaultdict(dict)) for counter, key in sorted(accuracy_stats.items())),
"Pr": dict((key, collections.defaultdict(dict)) for counter, key in sorted(accuracy_stats.items())),
"F1": dict((key, collections.defaultdict(dict)) for counter, key in sorted(accuracy_stats.items())),
"Matches": dict((key, collections.defaultdict(dict)) for counter, key in sorted(match_stats.items())),
"MNR":{
"Found": dict((key, collections.defaultdict(dict)) for counter, key
in sorted(novel_missed_stats.items())),
"Maximum": dict((key, collections.defaultdict(dict)) for counter, key
in sorted(novel_missed_stats.items())),
"Proportion": dict((key, collections.defaultdict(dict)) for counter, key in
sorted(novel_missed_stats.items()))
}
}
for stat in args.stat:
for line_counter, line in enumerate(stat, start=1):
if line_counter in accuracy_stats:
level = accuracy_stats[line_counter]
sn, pr, f1 = accuracy_retrieval(line, level)
data["Sn"][level][stat.name] = sn
data["Pr"][level][stat.name] = pr
data["F1"][level][stat.name] = f1
elif line_counter in match_stats:
level = match_stats[line_counter]
match = match_retrieval(line, level)
data["Matches"][level][stat.name] = match
continue
elif line_counter in novel_missed_stats:
level = novel_missed_stats[line_counter]
found, maximum, proportion = missed_novel_retrieval(line, level)
if stat.name not in data["MNR"]:
data["MNR"][stat.name] = collections.defaultdict(dict)
data["MNR"][stat.name][level]["Total"] = found
data["MNR"][stat.name][level]["Out of"] = maximum
data["MNR"][stat.name][level]["Proportion"] = proportion
continue
else:
continue
for tablefmt in args.tablefmt:
if tablefmt in ("tsv", "html", "csv", "rst"):
ext = tablefmt
elif "latex" in tablefmt:
ext = f"{tablefmt}.tex"
else:
ext = f"{tablefmt}.txt"
for key in ["Sn", "Pr", "F1", "Matches"]:
if not ("all" in args.levels or key.lower() in args.levels):
continue
if key == "Sn":
name = "Sensitivity"
elif key == "Pr":
name = "Precision"
elif key in ("F1", "Matches"):
name = key[:]
else:
continue
lname = name.lower()
with open(f"{args.out}.{lname}.{ext}", "wt") as out:
accuracy = pd.DataFrame.from_dict(dict(**data[key]), orient="index")
accuracy.index.name = f"{name}"
print(tabulate.tabulate(accuracy, showindex="always", headers="keys", tablefmt=tablefmt),
file=out)
# Now the last table
if not ("all" in args.levels or "missed_novel" in args.levels):
continue
dfs = []
for fname, fdata in data["MNR"].items():
df = pd.DataFrame.from_dict(fdata, orient="index")
df.columns.names = [fname]
dfs.append(df)
df = pd.concat(dfs, keys=[df.columns.names[0] for df in dfs], axis=1)
df.columns.names = ["File", "Statistic"]
h = [df.columns.names[0] + "\n" + df.columns.names[1]] + [
"\n".join([fname, stat]) if num % 3 == 1 else "\n" + stat for num, (fname, stat) in
enumerate(df.columns.tolist())]
with open(f"{args.out}.missed_novel.{ext}", "wt") as out:
print(tabulate.tabulate(df, showindex="always", headers=h, tablefmt=tablefmt), file=out)
def parser():
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("-o", "--out", default="mikado_compare_selected_stats",
help="Prefix for the output files")
parser.add_argument("-fmt", "--format", nargs="+", choices=tabulate.tabulate_formats,
dest="tablefmt", default=["grid"], metavar="",
help="List of formats to print the tables into, separated by a space. Available formats can be "
"seen using --available-formats. "
"Terminate the list with --. Default: %(default)s")
parser.add_argument("-l", "--levels", nargs="+", choices=["all", "f1", "sn", "pr", "matches", "missed_novel"],
default=["f1", "sn", "pr"],
help="Levels to print, separated by a space. Terminate the list with --. Default: %(default)s.")
parser.add_argument("-avf", "--available-formats", action="store_true", default=False, dest="avf",
help="Print out a list of available formats and exit.")
parser.add_argument("stat", type=argparse.FileType("rt"),
nargs="*")
parser.set_defaults(func=launch)
return parser
if __name__ == "__main__":
args = parser()
launch(args)
|
lgpl-3.0
|
devanshdalal/scikit-learn
|
examples/cluster/plot_cluster_iris.py
|
350
|
2593
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
|
bsd-3-clause
|
Kongsea/tensorflow
|
tensorflow/contrib/timeseries/examples/predict.py
|
69
|
5579
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An example of training and predicting with a TFTS estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
FLAGS = None
def structural_ensemble_train_and_predict(csv_file_name):
# Cycle between 5 latent values over a period of 100. This leads to a very
# smooth periodic component (and a small model), which is a good fit for our
# example data. Modeling high-frequency periodic variations will require a
# higher cycle_num_latent_values.
structural = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=100, num_features=1, cycle_num_latent_values=5)
return train_and_predict(structural, csv_file_name, training_steps=150)
def ar_train_and_predict(csv_file_name):
# An autoregressive model, with periodicity handled as a time-based
# regression. Note that this requires windows of size 16 (input_window_size +
# output_window_size) for training.
ar = tf.contrib.timeseries.ARRegressor(
periodicities=100, input_window_size=10, output_window_size=6,
num_features=1,
# Use the (default) normal likelihood loss to adaptively fit the
# variance. SQUARED_LOSS overestimates variance when there are trends in
# the series.
loss=tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS)
return train_and_predict(ar, csv_file_name, training_steps=600)
def train_and_predict(estimator, csv_file_name, training_steps):
"""A simple example of training and predicting."""
# Read data in the default "time,value" CSV format with no header
reader = tf.contrib.timeseries.CSVReader(csv_file_name)
# Set up windowing and batching for training
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=16, window_size=16)
# Fit model parameters to data
estimator.train(input_fn=train_input_fn, steps=training_steps)
# Evaluate on the full dataset sequentially, collecting in-sample predictions
# for a qualitative evaluation. Note that this loads the whole dataset into
# memory. For quantitative evaluation, use RandomWindowChunker.
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=200)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
return times, observed, all_times, mean, upper_limit, lower_limit
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit):
"""Plot a time series in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.plot(all_times, upper_limit, "g", label="forecast upper bound")
pyplot.plot(all_times, lower_limit, "g", label="forecast lower bound")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
make_plot("Structural ensemble",
*structural_ensemble_train_and_predict(FLAGS.input_filename))
make_plot("AR", *ar_train_and_predict(FLAGS.input_filename))
pyplot.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_filename",
type=str,
required=True,
help="Input csv file.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
apache-2.0
|
bibsian/database-development
|
test/manual_test_dialogsession.py
|
1
|
5780
|
#!/usr/bin/env python
import pytest
import pytestqt
from pandas import to_numeric
from PyQt4 import QtCore, QtGui, QtWebKit
import sys,os
from Views import ui_mainrefactor as mw
from Views import ui_dialog_session as dsess
from poplerGUI import class_inputhandler as ini
from poplerGUI.logiclayer import class_userfacade as face
from poplerGUI import class_modelviewpandas as view
rootpath = os.path.dirname(os.path.dirname( __file__ ))
end = os.path.sep
os.chdir(rootpath)
@pytest.fixture
def MainWindow():
class SessionDialog(QtGui.QDialog, dsess.Ui_Dialog):
'''
Dialog box prompts the user to inpute
unique metadata relating to the file that
will be loaded. Also select and load file into
rawdata viewer
'''
raw_data_model = QtCore.pyqtSignal(object)
webview_url = QtCore.pyqtSignal(object)
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
# Attributes
self.metaini = None
self.fileini = None
self.verify = None
# User facade composed from main window
self.facade = None
# Signal
self.btnVerifyMeta.clicked.connect(self.meta_handler)
self.btnSelectFile.clicked.connect(self.file_handler)
self.btnCancel.clicked.connect(self.close)
self.btnSaveClose.clicked.connect(self.close)
# Status Message boxes
self.error = QtGui.QErrorMessage()
self.message = QtGui.QMessageBox
def meta_handler(self):
'''
Method to pass the user input about metadata to the mainwindow
(where the facade class is instantiated).
'''
entries = {
'globalid': int(self.lnedGlobalId.text().strip()),
'metaurl': self.lnedMetadataUrl.text().strip(),
'lter': self.cboxLTERloc.currentText().strip()
}
self.metaini = ini.InputHandler(
name='metacheck', tablename=None,
lnedentry=entries, verify=self.verify)
self.facade.input_register(self.metaini)
try:
print(self.metaini.lnedentry['metaurl'])
self.facade.meta_verify()
self.webview_url.emit(
self.metaini.lnedentry['metaurl'])
self.message.about(self, 'Status', 'Entries recorded')
except Exception as e:
print(str(e))
self.error.showMessage('Invalid entries: ' + str(e))
raise LookupError('Invalid metadata entries')
def file_handler(self):
'''
Method to pass the user input about the file to load
'''
lned = {
'sheet': self.lnedExcelSheet.text().strip(),
'delim': self.lnedDelimiter.text().strip(),
'tskip': self.lnedSkipTop.text().strip(),
'bskip': self.lnedSkipBottom.text().strip(),
'header': ''
}
rbtn = {
'csv': self.rbtnCsv.isChecked(),
'xlsx': self.rbtnExcel.isChecked(),
'txt': self.rbtnTxt.isChecked()
}
name = QtGui.QFileDialog.getOpenFileName(
self, 'Select File')
headers = self.ckHeader.isChecked()
self.fileini = ini.InputHandler(
name='fileoptions', tablename=None,
rbtns=rbtn, lnedentry=lned, filename=name,
checks=headers
)
self.facade.input_register(self.fileini)
try:
self.facade.load_data()
self.raw_data_model.emit('loaded_data')
except Exception as e:
self.filetypeReceive.emit(str(e))
@QtCore.pyqtSlot(object)
def info_updates(self, message):
self.message.about(self, 'Status', message)
class UiMainWindow(QtGui.QMainWindow, mw.Ui_MainWindow):
'''
The main window class will serve to gather all informatoin
from Dialog boxes, actions, and instantiate classes
that are required to perform the necessary lower level logic
(i.e. implement a Facade, Commander, MetaVerifier, etc.
'''
raw_data_received = QtCore.pyqtSignal(object)
def __init__(self, parent=None):
super().__init__(parent)
# attributes
self.setupUi(self)
self.facade = face.Facade()
self.dsession = SessionDialog()
# Dialog boxes for user feedback
self.error = QtGui.QErrorMessage()
self.message = QtGui.QMessageBox
# Custom signals
self.dsession.raw_data_model.connect(
self.update_data_model)
# actions
self.actionStart_Session.triggered.connect(
self.session_display)
self.mdiArea.addSubWindow(self.subwindow_2)
self.mdiArea.addSubWindow(self.subwindow_1)
@QtCore.pyqtSlot(object)
def update_data_model(self, dataobject):
newdatamodel = view.PandasTableModel(self.facade._data)
self.tblViewRaw.setModel(newdatamodel)
self.raw_data_received.emit('Data loaded')
def session_display(self):
''' Displays the Site Dialog box'''
self.dsession.show()
self.dsession.facade = self.facade
return UiMainWindow()
def test_session_dialog(qtbot, MainWindow):
MainWindow.show()
qtbot.addWidget(MainWindow)
qtbot.stopForInteraction()
|
mit
|
jeffzhengye/pylearn
|
tensorflow_learning/tf2/structured_data.py
|
1
|
3848
|
# encoding: utf-8
'''
@author: jeffzhengye
@contact: [email protected]
@file: structured_data.py
@time: 2020/12/23 11:27
origin: https://www.tensorflow.org/tutorials/structured_data/feature_columns?hl=zh-cn
@desc: 样例: 如何使用tf.feature_column 来处理结构化数据,
'''
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import feature_column
from tensorflow import keras
from tensorflow.keras import layers
from sklearn.model_selection import train_test_split
from tensorflow.python.framework import dtypes
tf.feature_column.numeric_column
keras.layers.DenseFeatures
tf.feature_column.embedding_column
tf.feature_column.categorical_column_with_hash_bucket
tf.feature_column.indicator_column
tf.feature_column.bucketized_column
# URL = 'https://storage.googleapis.com/applied-dl/heart.csv'
# dataframe = pd.read_csv(URL)
data_file = 'heart.csv'
dataframe = pd.read_csv(data_file)
dataframe = dataframe.replace({'thal': {0: 'normal', 1: "fixed", 2: "normal"}})
dataframe = dataframe.astype({'thal': str})
print(dataframe.head())
train, test = train_test_split(dataframe, test_size=0.2)
train, val = train_test_split(train, test_size=0.2)
print(len(train), 'train examples')
print(len(val), 'validation examples')
print(len(test), 'test examples')
print(train.head())
# 一种从 Pandas Dataframe 创建 tf.data 数据集的实用程序方法(utility method)
def df_to_dataset(dataframe, shuffle=True, batch_size=2):
dataframe = dataframe.copy()
labels = dataframe.pop('target')
ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels))
if shuffle:
ds = ds.shuffle(buffer_size=len(dataframe))
ds = ds.batch(batch_size)
return ds
batch_size = 3 # 小批量大小用于演示
train_ds = df_to_dataset(train, shuffle=False, batch_size=batch_size)
val_ds = df_to_dataset(val, shuffle=False, batch_size=batch_size)
test_ds = df_to_dataset(test, shuffle=False, batch_size=batch_size)
for feature_batch, label_batch in train_ds.take(1):
print('Every feature:', list(feature_batch.keys()))
print('A batch of ages:', feature_batch['age'])
print('A batch of targets:', label_batch)
# 我们将使用该批数据演示几种特征列
example_batch = next(iter(train_ds))[0]
print('example_batch', example_batch)
# sparse_input = {'indices':[[0, 0], [0, 1], [1, 2]], 'values': ['fixed', 'reversible', 'normal'], 'dense_shape': [2, 4]}
sparse_input = {'indices':[[0, 0], [0, 1], [1, 2]], 'values': [1, 1, 1], 'dense_shape': [2, 4]}
input_sparse = tf.sparse.SparseTensor(**sparse_input)
# input_sparse = tf.sparse.SparseTensor(indices=[[0, 0], [0, 1], [1, 2]], values=['fixed', 'reversible', 'normal'], dense_shape=[2, 4])
# example_batch = {
# 'thal': input_sparse
# }
# 用于创建一个特征列
# 并转换一批次数据的一个实用程序方法
def demo(feature_column):
feature_layer = layers.DenseFeatures(feature_column)
name = feature_column.name.split('_')[0]
print('input:', example_batch[name])
print(feature_layer(example_batch).numpy())
age = feature_column.numeric_column("age")
demo(age)
#
age_buckets = feature_column.bucketized_column(age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
demo(age_buckets)
#
# thal = feature_column.categorical_column_with_vocabulary_list(
# 'thal', ['fixed', 'normal', 'reversible'])
thal = feature_column.categorical_column_with_hash_bucket('thal', 20, dtype=dtypes.int32)
#
# thal_one_hot = feature_column.indicator_column(thal)
# demo(thal_one_hot)
# demo(thal)
# 注意到嵌入列的输入是我们之前创建的类别列
thal_embedding = feature_column.embedding_column(thal, dimension=8, combiner='sum')
# demo(thal_embedding)
|
unlicense
|
jadhavhninad/-CSE_515_MWD_Analytics-
|
Phase 3/Phase3_code/data_preprocessing.py
|
2
|
13146
|
from mysqlConn import DbConnect
from datetime import datetime
import operator
from math import exp,log
import pandas as pd
import pprint
db = DbConnect()
db_conn = db.get_connection()
cur2 = db_conn.cursor();
'''
#------------------------------------------------------
#TASK 1 : General Pre-processing
#------------------------------------------------------
#Using FEATURE SCALING for the tag newnewss.
#Using just normalization on the timestamp values and limiting between 0.5 and 1.
fmt = '%Y-%m-%d %H:%M:%S'
cur2.execute("Select timestamp from `mltags` order by timestamp asc limit 1")
result = cur2.fetchone()
ts_min = datetime.strptime(result[0], fmt)
cur2.execute("Select timestamp from `mltags` order by timestamp desc limit 1")
result = cur2.fetchone()
ts_max = datetime.strptime(result[0], fmt)
ts_diff = ts_max - ts_min
cur2.execute("Alter table `mltags` add column newness_wt varchar(20) NOT NULL")
cur2.execute("Select timestamp from `mltags`")
result = cur2.fetchall()
fmt = '%Y-%m-%d %H:%M:%S'
row_count = 0
for timedata in result:
ts_val = datetime.strptime(timedata[0], fmt)
ts_wt = (ts_val - ts_min).total_seconds() / ts_diff.total_seconds()
balanced_wt = float(0.5 + 0.5*ts_wt)
cur2.execute("UPDATE `mltags` set newness_wt = %s where timestamp = %s", (balanced_wt, timedata[0]))
if row_count >= 1000:
db_conn.commit()
row_count=0
db_conn.commit()
print "General preprocessing done."
#==========================================================
#TASK - 1 : PRE - PROCESSING
#==========================================================
#SUB -TASK 1 - Cleaning the mlmovies table. Getting single row for a single genre.
#a. Create a new table mlmovies_clean that has a single entry for a single genre.
#b. For each entry in the mlmovies create an entry in mlmovies_clean that has a unique genre entry.
cur2.execute("create table `mlmovies_clean`(movieid varchar(10) NOT NULL, moviename varchar(200) NOT NULL, year varchar(4) NOT NULL, genres varchar(200) NOT NULL)")
query1 = "SELECT * FROM `mlmovies`"
cur2.execute(query1)
result1 = cur2.fetchall()
row_count = 0
#For each tagID get the movie list
for entry in result1:
mvid = entry[0]
mvname = entry[1]
year = entry[2]
combo_genres = entry[3].split("|")
#Add new row for each genre.
for genre in combo_genres:
cur2.execute('INSERT INTO `mlmovies_clean`(movieid, moviename, year, genres) VALUES(%s, %s, %s, %s)', (mvid,mvname,year,genre))
if row_count >= 1000:
db_conn.commit()
row_count = 0
db_conn.commit()
#----------------------------------------------------------------------
#====================================================================
#TASK - 2 : Weights of movies based on year using exponential decay
#====================================================================
#Get the max year.
cur2.execute("SELECT max(year) FROM mlmovies")
max_year = int(cur2.fetchone()[0])
#add a column year_weight in the table mlmovies.
cur2.execute("Alter table `mlmovies` add column year_wt FLOAT(15) NOT NULL")
cur2.execute("SELECT year FROM `mlmovies`")
result = cur2.fetchall()
# k = decay constant. Appropriate decay constant is used so that the exponential
# values stay within limit (after trying with 1,0.1,0.01,0.001) and
k=0.1
for movie_year in result:
current_year = int(movie_year[0])
diff = max_year - current_year
movie_wt = float(exp(-k*diff))
cur2.execute("UPDATE `mlmovies` set year_wt = %s where year = %s",(movie_wt,movie_year[0]))
db_conn.commit()
#-------------------------------------------------------------------
print "Starting preprocessing for genre tag vectors..."
#==========================================================
#TASK - 3 : PRE - PROCESSING FOR GENRE VECTOR
#==========================================================
#Sub-TASK 2 : creating a weighted_genre_movie_count for calculating the idf value.
#Since we already have the TF value and it's data, we now generate the required data for idf.
#IDF here will be considered as the number of movie-genre that belong to a certain tag. So the idf calculation will be
# Total movie-genres / sum of weight of movie-genres with a particular tag
#Calculate the total weighted count for movie-genre count for each tag.
#weighted count for an occurance of a tag = tag_newness
#Create a column weighted_movie-genre count for storing the values
####cur2.execute("Alter table `genome-tags` add column total_wt_movie_genre_count varchar(20) NULL")
#db_conn.commit()
weighted_genre_movie_count={}
cur2.execute("SELECT movieid FROM `mlmovies_clean`")
result1 = cur2.fetchall()
for data1 in result1:
#print data1
genre_movie_id = data1[0]
genre_tag_id=""
final_tag_wt=""
#Select distint tagIDs for the movieID
cur2.execute("SELECT tagid,newness_wt FROM mltags WHERE movieid = %s",[genre_movie_id])
result2 = cur2.fetchall()
for data2 in result2:
genre_tag_id = data2[0]
genre_tag_newness = data2[1]
#Get the tag_name for the tagID. For each tag weight, add the rank_weight as well.
cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id])
result2_sub = cur2.fetchone()
tagName = result2_sub[0]
tagWeight = round((float(genre_tag_newness)),10)
if tagName in weighted_genre_movie_count:
weighted_genre_movie_count[tagName] = round((weighted_genre_movie_count[tagName] + tagWeight), 10)
else:
weighted_genre_movie_count[tagName] = tagWeight
#Update the total_weighted_genre_count for all the tags already there, in the same column in genome-tags
cur2.execute("SELECT tag FROM `genome-tags`")
tagName = cur2.fetchall()
row_count = 0
for keyval in tagName:
key = keyval[0]
if key in weighted_genre_movie_count:
cur2.execute("UPDATE `genome-tags` set total_wt_movie_genre_count= %s where tag=%s",(weighted_genre_movie_count[key],key))
if row_count >= 1000:
db_conn.commit()
row_count = 0
db_conn.commit();
print "done"
'''
#===================================================================
#Task-4: Generate the tag vectors for each genre using tf-idf model
#===================================================================
genre_vectors = {}
cur2.execute("SELECT distinct genres FROM mlmovies_clean")
resultgen = cur2.fetchall()
for genre in resultgen:
genre_vectors[genre[0]] = {}
#TF MODEL
#Subtask:1 - Get tags and Genre
#a. A dictionary to store the returned data.
data_dictionary_tf = {}
data_dictionary_tf_idf = {}
total_tag_newness_weight = 0
#Get total movie-genre count for idf calculation.
#For each tag, a document here is row in mlmovies_clean table that has genre-movie.
cur2.execute("SELECT COUNT(distinct movieid,genres) FROM mlmovies_clean")
result0 = cur2.fetchone()
total_documents = result0[0]
#Get all movies of a specific genre.
cur2.execute("SELECT movieid FROM `mlmovies_clean` where genres = %s",genre)
result1 = cur2.fetchall()
for data1 in result1:
#print data1
genre_movie_id = data1[0]
#Select distint tagIDs for the movieID
cur2.execute("SELECT tagid,newness_wt FROM mltags WHERE movieid = %s",[genre_movie_id])
result2 = cur2.fetchall()
for data2 in result2:
genre_tag_id = data2[0]
genre_tag_newness = data2[1]
#Get the tag_name for the tagID.
cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id])
result2_sub = cur2.fetchone()
tagName = result2_sub[0]
tagWeight = round(float(genre_tag_newness),10)
total_tag_newness_weight = total_tag_newness_weight + tagWeight
if tagName in data_dictionary_tf:
data_dictionary_tf[tagName] = round((data_dictionary_tf[tagName] + tagWeight),10)
else:
data_dictionary_tf[tagName] = tagWeight
# Make weight of other tags to zero.
cur2.execute("SELECT tag FROM `genome-tags`")
tagName = cur2.fetchall()
for keyval in tagName:
key = keyval[0]
if key in data_dictionary_tf:
data_dictionary_tf[key] = round((float(data_dictionary_tf[key]) / float(total_tag_newness_weight)),10)
else:
data_dictionary_tf[key] = 0
#genre_model_value_tf = sorted(data_dictionary_tf.items(), key=operator.itemgetter(1), reverse=True)
#IDF CALCULATION.
cur2.execute("SELECT movieid FROM `mlmovies_clean` where genres = %s", genre)
result3 = cur2.fetchall()
for data1 in result3:
genres_movie_id = data1[0]
# Select tagIDs for the movieID
cur2.execute("SELECT tagid FROM mltags WHERE movieid = %s", [genres_movie_id])
result4 = cur2.fetchall()
for data2 in result4:
genres_tag_id = data2[0]
# Get the tag_name for the tagID.
cur2.execute("SELECT tag,total_wt_movie_genre_count FROM `genome-tags` WHERE tagID = %s", [genres_tag_id])
result2_sub = cur2.fetchone()
tagName = result2_sub[0]
tag_movie_genre_count = result2_sub[1]
if tagName in data_dictionary_tf_idf:
continue;
else:
data_dictionary_tf_idf[tagName] = float(tag_movie_genre_count)
#Once all data is recorded, calculate the idf and tfidf
#Make weight of other tags to zero.
cur2.execute("SELECT tag FROM `genome-tags`")
tagName = cur2.fetchall()
for key in tagName:
keyval = key[0]
if keyval in data_dictionary_tf_idf:
data_dictionary_tf_idf[keyval] = round((float(log((total_documents / data_dictionary_tf_idf[keyval]), 2.71828))), 10)
data_dictionary_tf_idf[keyval] = round((data_dictionary_tf[keyval] * data_dictionary_tf_idf[keyval]), 10)
else:
data_dictionary_tf_idf[keyval] = 0
genre_model_value_tf_idf = sorted(data_dictionary_tf_idf.items(), key=operator.itemgetter(0), reverse=True)
#pprint.pprint(genre_model_value_tf_idf)
#Update in the master list which will then be converted to dataframe and written to a file.
for key in genre_model_value_tf_idf:
genre_vectors[genre[0]][key[0]] = key[1]
#pprint.pprint(dd_users_movie)
genre_tag_matrix = pd.DataFrame(genre_vectors)
#genre_tag_matrix = genre_tag_matrix.T
#pprint.pprint(genre_tag_matrix)
genre_tag_matrix.to_csv("genre-tag_vectors.csv", sep='\t')
#========================================================================================
# Task 5 - Generate movie-genere vector matrix.
#========================================================================================
#For a genre assigned to movie, the movie-genre value is sum of tags that belong to a movie
#and seen for that genre
movie_genre_vectors={}
#Get all movies
cur2.execute("SELECT movieid FROM `mlmovies`")
result1 = cur2.fetchall()
for mv_name in result1:
#print data1
movie_genre_vectors[mv_name[0]]={}
user_movie_id = {mv_name[0],}
cur2.execute("SELECT genres FROM `mlmovies_clean` where movieid = %s", user_movie_id)
result2 = cur2.fetchall()
for gen_list in result2:
#Select all tags for the movie
cur2.execute("SELECT tagid FROM `mltags` where movieid = %s", user_movie_id)
result3 = cur2.fetchall()
#print "result3 = ",result3
for vals in result3:
#print vals
cur2.execute("SELECT tag FROM `genome-tags` where tagid = %s", vals)
tag_name = cur2.fetchone()[0]
#For a genre of a movie add the tag weights for the genre.The sum is the value of
#movie-genre vector.
if gen_list[0] in movie_genre_vectors[mv_name[0]]:
movie_genre_vectors[mv_name[0]][gen_list[0]] += genre_vectors[gen_list[0]][tag_name]
else:
#Add mean for the first time.
movie_genre_vectors[mv_name[0]][gen_list[0]] = genre_vectors[gen_list[0]][tag_name] + genre_tag_matrix[gen_list[0]].mean()
#Add mean of a genre from genre-tag vector matrix
# This is done to handle the case: If a movie has no tags, but still belongs to a genre,
if result3 == ():
#print "mean = ", genre_tag_matrix[gen_list[0]].mean()
movie_genre_vectors[mv_name[0]][gen_list[0]] = genre_tag_matrix[gen_list[0]].mean()
#Make other genre values 0
cur2.execute("SELECT distinct genres FROM `mlmovies_clean`")
result_gen = cur2.fetchall()
for gen in result_gen:
if gen[0] in movie_genre_vectors[mv_name[0]]:
#print movie_genre_vectors[mv_name[0]][gen[0]]
continue
else:
movie_genre_vectors[mv_name[0]][gen[0]] = 0
#pprint.pprint(dd_users_movie)
movie_genre_matrix = pd.DataFrame(movie_genre_vectors)
movie_genre_matrix = movie_genre_matrix.T
#pprint.pprint(genre_tag_matrix)
movie_genre_matrix.to_csv("movie_genre_matrix.csv", sep='\t')
|
gpl-3.0
|
mwmuni/LIGGGHTS_GUI
|
networkx/tests/test_convert_pandas.py
|
43
|
2177
|
from nose import SkipTest
from nose.tools import assert_true
import networkx as nx
class TestConvertPandas(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
try:
import pandas as pd
except ImportError:
raise SkipTest('Pandas not available.')
def __init__(self, ):
global pd
import pandas as pd
self.r = pd.np.random.RandomState(seed=5)
ints = self.r.random_integers(1, 10, size=(3,2))
a = ['A', 'B', 'C']
b = ['D', 'A', 'E']
df = pd.DataFrame(ints, columns=['weight', 'cost'])
df[0] = a # Column label 0 (int)
df['b'] = b # Column label 'b' (str)
self.df = df
def assert_equal(self, G1, G2):
assert_true( nx.is_isomorphic(G1, G2, edge_match=lambda x, y: x == y ))
def test_from_dataframe_all_attr(self, ):
Gtrue = nx.Graph([('E', 'C', {'cost': 9, 'weight': 10}),
('B', 'A', {'cost': 1, 'weight': 7}),
('A', 'D', {'cost': 7, 'weight': 4})])
G=nx.from_pandas_dataframe(self.df, 0, 'b', True)
self.assert_equal(G, Gtrue)
def test_from_dataframe_multi_attr(self, ):
Gtrue = nx.Graph([('E', 'C', {'cost': 9, 'weight': 10}),
('B', 'A', {'cost': 1, 'weight': 7}),
('A', 'D', {'cost': 7, 'weight': 4})])
G=nx.from_pandas_dataframe(self.df, 0, 'b', ['weight', 'cost'])
self.assert_equal(G, Gtrue)
def test_from_dataframe_one_attr(self, ):
Gtrue = nx.Graph([('E', 'C', {'weight': 10}),
('B', 'A', {'weight': 7}),
('A', 'D', {'weight': 4})])
G=nx.from_pandas_dataframe(self.df, 0, 'b', 'weight')
self.assert_equal(G, Gtrue)
def test_from_dataframe_no_attr(self, ):
Gtrue = nx.Graph([('E', 'C', {}),
('B', 'A', {}),
('A', 'D', {})])
G=nx.from_pandas_dataframe(self.df, 0, 'b',)
self.assert_equal(G, Gtrue)
|
gpl-3.0
|
h-mayorquin/time_series_basic
|
examples/one_dimensional_case.py
|
1
|
1750
|
"""
In this script we generated a one dimensional time series with
the class using an exponentially decaying filter mixed with
a periodic signal and studied its correlation. This was done
in order to case-show an special case for Pawell.
"""
import numpy as np
import matplotlib.pyplot as plt
from signals.signal_class import SpatioTemporalSignal
# First the paramters
dt = 0.1 # Resoultion
delay = 10 # In s
Tmax = 20 # In s
# Plotting
plot = True
verbose = False
# Let's define the object
A = SpatioTemporalSignal(dt=dt, delay=delay, Tmax=Tmax, Nseries=2)
time = A.time
####
# Filter contrusction
####
# First the time
filter_time = np.arange(A.Ndelay) * dt
# Now the filter
b
tau = 1
Amp = -100.0
decay = np.exp(-filter_time / tau)
periodic = Amp * np.cos(filter_time)
alpha = decay * periodic
alpha_to_plot = alpha
alpha_to_plot = periodic
# alpha_to_plot = decay * A.Ndelay
decay = np.exp(-time / tau)
periodic = Amp * np.cos(time)
alpha = decay * periodic
# alpha = decay * A.Ndelay
# alpha = periodic
# The rest of the interaction terms
b = np.zeros(A.NTmax)
c = np.zeros(A.NTmax)
d = np.zeros(A.NTmax)
interaction = np.array(((alpha, b), (c, d)))
###
# Complete the series and extract it
###
initial_coditions = np.array((1, 0))
A.set_initial_conditions(initial_coditions)
A.set_interaction(interaction)
if verbose:
A.construct_series_verbose()
else:
A.construct_series()
result = A.series[0, :]
correlation = np.correlate(result, result, mode='same')
correlation_to_plot = correlation[correlation.size/2:]
if plot:
plt.subplot(3, 1, 1)
plt.plot(time, result)
plt.subplot(3, 1, 2)
plt.plot(filter_time, alpha_to_plot)
plt.subplot(3, 1, 3)
plt.plot(correlation_to_plot)
plt.show()
|
bsd-3-clause
|
tommiseppanen/visualizations
|
tyre-model/scatter.py
|
1
|
3882
|
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import math
class FrictionCurvePoint:
def __init__(self, value, slip):
self.value = value
self.slip = slip
class FrictionCurve:
def __init__(self, extremum_value, asymptote_value, extremum_slip, asymptote_slip):
self.extremum = FrictionCurvePoint(extremum_value, extremum_slip)
self.asymptote = FrictionCurvePoint(asymptote_value, asymptote_slip)
def calculate_coefficient(self, value):
absolute_value = abs(value)
if absolute_value <= self.extremum.slip:
return (absolute_value / self.extremum.slip) * self.extremum.value
elif self.extremum.slip < absolute_value < self.asymptote.slip:
return ((self.asymptote.value - self.extremum.value) / (self.asymptote.slip - self.extremum.slip)) \
* (absolute_value - self.extremum.slip) + self.extremum.value
return self.asymptote.value
def coefficient(long_slip_value, lat_slip_value, longitudinal_curve, lateral_curve):
combined_slip = np.sqrt(lat_slip_value ** 2 + long_slip_value ** 2)
if combined_slip == 0:
return 0
if long_slip_value != 0:
combined_slip_curve = create_combined_curve(long_slip_value, lat_slip_value, longitudinal_curve, lateral_curve)
return combined_slip_curve.calculate_coefficient(combined_slip)
return lateral_curve.calculate_coefficient(lat_slip_value)
def create_combined_curve(long_slip_value, lat_slip_value, longitudinal_curve, lateral_curve):
absolute_long_slip_value = abs(long_slip_value)
absolute_lat_slip_value = abs(lat_slip_value)
gradient = absolute_lat_slip_value / absolute_long_slip_value
slip_values = (absolute_long_slip_value, absolute_lat_slip_value)
extremum_value, asymptote_value = interpolate_combined_values(slip_values, longitudinal_curve, lateral_curve)
limit_extremum = calculate_limit(gradient, longitudinal_curve.extremum.slip, lateral_curve.extremum.slip)
limit_asymptote = calculate_limit(gradient, longitudinal_curve.asymptote.slip, lateral_curve.asymptote.slip)
return FrictionCurve(extremum_value, asymptote_value, limit_extremum, limit_asymptote)
def interpolate_combined_values(slip_values, longitudinal_curve, lateral_curve):
return interpolate_by_angle(slip_values, longitudinal_curve.extremum, lateral_curve.extremum), \
interpolate_by_angle(slip_values, longitudinal_curve.asymptote, lateral_curve.asymptote)
def interpolate_by_angle(slip_values, longitudinal_curve_point, lateral_curve_point):
long_slip_value, lat_slip_value = slip_values
angle = math.atan2(lat_slip_value,
(lateral_curve_point.slip / longitudinal_curve_point.slip) * long_slip_value)
return longitudinal_curve_point.value + \
((lateral_curve_point.value - longitudinal_curve_point.value) / (
math.pi / 2)) * angle
def calculate_limit(gradient, longitudinal_slip, lateral_slip):
limit_x = (longitudinal_slip * lateral_slip) / np.sqrt(lateral_slip ** 2 + longitudinal_slip ** 2 * gradient ** 2)
limit_y = gradient * limit_x
return np.sqrt(limit_x ** 2 + limit_y ** 2)
fig = plt.figure()
ax = fig.gca(projection='3d')
X = np.arange(0.0, 0.9, 0.01)
Y = np.arange(0, 90, 1)
xs = np.zeros(len(X) * len(Y))
ys = np.zeros(len(X) * len(Y))
zs = np.zeros(len(X) * len(Y))
c = ["" for x in range(len(X) * len(Y))]
Z = np.zeros((len(X), len(Y)))
for x in range(len(X)):
for y in range(len(Y)):
xs[x * len(Y) + y] = X[x]
ys[x * len(Y) + y] = Y[y]
value = coefficient(X[x], Y[y], FrictionCurve(2 * 1.0, 2 * 0.75, 0.4, 0.8), FrictionCurve(1.0, 0.75, 20, 40))
zs[x * len(Y) + y] = value
c[x * len(Y) + y] = 'b' if value <= 0.75 else 'r'
ax.scatter(xs, ys, zs, s=1, c=c)
plt.show()
|
mit
|
velocyno/glances
|
setup.py
|
1
|
3412
|
#!/usr/bin/env python
import glob
import sys
import re
from setuptools import setup, Command
# Global functions
##################
def get_version():
"""Get version inside the __init__.py file"""
init_file = open("glances/__init__.py").read()
reg_version = r"^__version__ = ['\"]([^'\"]*)['\"]"
find_version = re.search(reg_version, init_file, re.M)
if find_version:
return find_version.group(1)
else:
print("Can not retreive Glances version in the glances/__init__.py file.")
sys.exit(1)
def get_data_files():
data_files = [
('share/doc/glances', ['AUTHORS', 'COPYING', 'NEWS', 'README.rst',
'conf/glances.conf']),
('share/man/man1', ['docs/man/glances.1'])
]
return data_files
def get_requires():
requires = ['psutil>=2.0.0']
return requires
class tests(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import subprocess
import sys
for t in glob.glob('unitest.py'):
ret = subprocess.call([sys.executable, t]) != 0
if ret != 0:
raise SystemExit(ret)
raise SystemExit(0)
# Global vars
#############
glances_version = get_version()
if sys.version_info < (2, 7) or (3, 0) <= sys.version_info < (3, 3):
print('Glances {0} require at least Python 2.7 or 3.3 to run.'.format(glances_version))
print('Please install Glances 2.6.2 on your system.')
sys.exit(1)
# Setup !
setup(
name='Glances',
version=glances_version,
description="A cross-platform curses-based monitoring tool",
long_description=open('README.rst').read(),
author='Nicolas Hennion',
author_email='[email protected]',
url='https://github.com/nicolargo/glances',
license="LGPL",
keywords="cli curses monitoring system",
install_requires=get_requires(),
extras_require={
'WEB': ['bottle', 'requests'],
'SENSORS': ['py3sensors'],
'BATINFO': ['batinfo'],
'SNMP': ['pysnmp'],
'CHART': ['matplotlib'],
'BROWSER': ['zeroconf>=0.17'],
'IP': ['netifaces'],
'RAID': ['pymdstat'],
'DOCKER': ['docker-py'],
'EXPORT': ['influxdb>=1.0.0', 'elasticsearch', 'potsdb' 'statsd', 'pika', 'bernhard', 'cassandra-driver'],
'ACTION': ['pystache'],
'CPUINFO': ['py-cpuinfo'],
'FOLDERS': ['scandir']
},
packages=['glances'],
include_package_data=True,
data_files=get_data_files(),
cmdclass={'test': tests},
test_suite="unitest.py",
entry_points={"console_scripts": ["glances=glances:main"]},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console :: Curses',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
]
)
|
lgpl-3.0
|
terkkila/scikit-learn
|
examples/text/mlcomp_sparse_document_classification.py
|
292
|
4498
|
"""
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix of the %s classifier' % name)
pl.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
pl.show()
|
bsd-3-clause
|
simpeg/discretize
|
examples/plot_slicer_demo.py
|
1
|
6526
|
"""
Slicer demo
===========
The example demonstrates the `plot_3d_slicer`
- contributed by `@prisae <https://github.com/prisae>`_
Using the inversion result from the example notebook
`plot_laguna_del_maule_inversion.ipynb <http://docs.simpeg.xyz/content/examples/20-published/plot_laguna_del_maule_inversion.html>`_
In the notebook, you have to use :code:`%matplotlib notebook`.
"""
# %matplotlib notebook
import os
import discretize
import numpy as np
import tarfile
import matplotlib.pyplot as plt
from matplotlib.colors import SymLogNorm
###############################################################################
# Download and load data
# ----------------------
#
# In the following we load the :code:`mesh` and :code:`Lpout` that you would
# get from running the laguna-del-maule inversion notebook.
f = discretize.utils.download(
"https://storage.googleapis.com/simpeg/laguna_del_maule_slicer.tar.gz",
overwrite=True,
)
tar = tarfile.open(f, "r")
tar.extractall()
tar.close()
# Load the mesh and model
mesh = discretize.load_mesh(os.path.join("laguna_del_maule_slicer", "mesh.json"))
Lpout = np.load(os.path.join("laguna_del_maule_slicer", "Lpout.npy"))
###############################################################################
# Case 1: Using the intrinsinc functionality
# ------------------------------------------
#
# 1.1 Default options
# ^^^^^^^^^^^^^^^^^^^
mesh.plot_3d_slicer(Lpout)
###############################################################################
# 1.2 Create a function to improve plots, labeling after creation
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Depending on your data the default option might look a bit odd. The look
# of the figure can be improved by getting its handle and adjust it.
def beautify(title, fig=None):
"""Beautify the 3D Slicer result."""
# Get figure handle if not provided
if fig is None:
fig = plt.gcf()
# Get principal figure axes
axs = fig.get_children()
# Set figure title
fig.suptitle(title, y=0.95, va="center")
# Adjust the y-labels on the first subplot (XY)
plt.setp(axs[1].yaxis.get_majorticklabels(), rotation=90)
for label in axs[1].yaxis.get_ticklabels():
label.set_visible(False)
for label in axs[1].yaxis.get_ticklabels()[::3]:
label.set_visible(True)
axs[1].set_ylabel("Northing (m)")
# Adjust x- and y-labels on the second subplot (XZ)
axs[2].set_xticks([357500, 362500, 367500])
axs[2].set_xlabel("Easting (m)")
plt.setp(axs[2].yaxis.get_majorticklabels(), rotation=90)
axs[2].set_yticks([2500, 0, -2500, -5000])
axs[2].set_yticklabels(["$2.5$", "0.0", "-2.5", "-5.0"])
axs[2].set_ylabel("Elevation (km)")
# Adjust x-labels on the third subplot (ZY)
axs[3].set_xticks([2500, 0, -2500, -5000])
axs[3].set_xticklabels(["", "0.0", "-2.5", "-5.0"])
# Adjust colorbar
axs[4].set_ylabel("Density (g/cc$^3$)")
# Ensure sufficient margins so nothing is clipped
plt.subplots_adjust(bottom=0.1, top=0.9, left=0.1, right=0.9)
###############################################################################
#
mesh.plot_3d_slicer(Lpout)
beautify("mesh.plot_3d_slicer(Lpout)")
###############################################################################
# 1.3 Set `xslice`, `yslice`, and `zslice`; transparent region
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The 2nd-4th input arguments are the initial x-, y-, and z-slice location
# (they default to the middle of the volume). The transparency-parameter can
# be used to define transparent regions.
mesh.plot_3d_slicer(Lpout, 370000, 6002500, -2500, transparent=[[-0.02, 0.1]])
beautify(
"mesh.plot_3d_slicer("
"\nLpout, 370000, 6002500, -2500, transparent=[[-0.02, 0.1]])"
)
###############################################################################
# 1.4 Set `clim`, use `pcolor_opts` to show grid lines
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
mesh.plot_3d_slicer(
Lpout, clim=[-0.4, 0.2], pcolor_opts={"edgecolor": "k", "linewidth": 0.1}
)
beautify(
"mesh.plot_3d_slicer(\nLpout, clim=[-0.4, 0.2], "
"pcolor_opts={'edgecolor': 'k', 'linewidth': 0.1})"
)
###############################################################################
# 1.5 Use `pcolor_opts` to set `SymLogNorm`, and another `cmap`
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
mesh.plot_3d_slicer(
Lpout, pcolor_opts={"norm": SymLogNorm(linthresh=0.01), "cmap": "RdBu_r"}
)
beautify(
"mesh.plot_3d_slicer(Lpout,"
"\npcolor_opts={'norm': SymLogNorm(linthresh=0.01),'cmap': 'RdBu_r'})`"
)
###############################################################################
# 1.6 Use :code:`aspect` and :code:`grid`
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# By default, :code:`aspect='auto'` and :code:`grid=[2, 2, 1]`. This means that
# the figure is on a 3x3 grid, where the `xy`-slice occupies 2x2 cells of the
# subplot-grid, `xz`-slice 2x1, and the `zy`-silce 1x2. So the
# :code:`grid=[x, y, z]`-parameter takes the number of cells for `x`, `y`, and
# `z`-dimension.
#
# :code:`grid` can be used to improve the probable weired subplot-arrangement
# if :code:`aspect` is anything else than :code:`auto`. However, if you zoom
# then it won't help. Expect the unexpected.
mesh.plot_3d_slicer(Lpout, aspect=["equal", 1.5], grid=[4, 4, 3])
beautify("mesh.plot_3d_slicer(Lpout, aspect=['equal', 1.5], grid=[4, 4, 3])")
###############################################################################
# 1.7 Transparency-slider
# ^^^^^^^^^^^^^^^^^^^^^^^
#
# Setting the transparent-parameter to 'slider' will create interactive sliders
# to change which range of values of the data is visible.
mesh.plot_3d_slicer(Lpout, transparent="slider")
beautify("mesh.plot_3d_slicer(Lpout, transparent='slider')")
###############################################################################
# Case 2: Just using the Slicer class
# ------------------------------------------
#
# This way you get the figure-handle, and can do further stuff with the figure.
# You have to initialize a figure
fig = plt.figure()
# Then you have to get the tracker from the Slicer
tracker = discretize.mixins.Slicer(mesh, Lpout)
# Finally you have to connect the tracker to the figure
fig.canvas.mpl_connect("scroll_event", tracker.onscroll)
# Run it through beautify
beautify("'discretize.mixins.Slicer' together with\n'fig.canvas.mpl_connect'", fig)
plt.show()
|
mit
|
weld-project/weld
|
examples/python/grizzly/get_population_stats_grizzly.py
|
3
|
1373
|
#!/usr/bin/python
# The usual preamble
import numpy as np
import grizzly.numpy_weld as npw
import pandas as pd
import grizzly.grizzly as gr
import time
# Get data (NYC 311 service request dataset) and start cleanup
raw_data = pd.read_csv('data/us_cities_states_counties.csv', delimiter='|')
raw_data.dropna(inplace=True)
data = gr.DataFrameWeld(raw_data)
print "Done reading input file..."
start = time.time()
# Get all city information with total population greater than 500,000
data_big_cities = data[data["Total population"] > 500000]
data_big_cities_new_df = data_big_cities[["State short"]]
# Compute "crime index" proportional to
# exp((Total population + 2*(Total adult population) - 2000*(Number of
# robberies)) / 100000)
data_big_cities_stats = data_big_cities[
["Total population", "Total adult population", "Number of robberies"]].values
predictions = npw.exp(npw.dot(data_big_cities_stats, np.array(
[1, 2, -2000], dtype=np.int64)) / 100000.0)
predictions = predictions / predictions.sum()
data_big_cities_new_df["Crime index"] = predictions
# Aggregate "crime index" scores by state
data_big_cities_grouped_df = data_big_cities_new_df.groupby(
"State short").sum()
print sorted(["%.4f" % ele for ele in data_big_cities_grouped_df.evaluate().to_pandas()["Crime index"]])
end = time.time()
print "Total end-to-end time: %.2f" % (end - start)
|
bsd-3-clause
|
zhongyuanzhou/FCH808.github.io
|
Data Visualization/Project/wrangle/nobel_scrape.py
|
2
|
3364
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 30 17:31:07 2015
@author: fch
"""
import requests
import json
import prettytable
import csv
import codecs
import requests
import pandas as pd
import sys
from bs4 import BeautifulSoup
from latlon import *
from wrangle import *
# import os
# os.chdir(os.path.join(os.path.expanduser('~FCH80_000'), "Temp2\FCH808.github.io\Data Visualization\Project"))
reload(sys)
sys.setdefaultencoding("utf-8")
url = 'http://www.nobelprize.org/nobel_prizes/lists/universities.html'
r = requests.get(url)
soup = BeautifulSoup(r.text, from_encoding=r.encoding)
acquired_html = soup.find_all(name="div", attrs={"class": "by_year"})
nobel_acquired = find_country_acq(acquired_html)
###############################################################################
#acq_lat_lon = create_lat_lon(nobel_acquired, country_type='acquired',
# country_col='current_country_name_acquired',
# city_col='city', state_col='state')
###############################################################################
nobel_acquired2 = pd.merge(nobel_acquired, acq_lat_lon)
url2 = 'http://www.nobelprize.org/nobel_prizes/lists/age.html'
r2 = requests.get(url2)
soup2 = BeautifulSoup(r2.text)
age_html = soup2.find_all(name="div", attrs={"class": "large-12 columns"})
nobel_ages = find_age(age_html)
url3 = 'http://www.nobelprize.org/nobel_prizes/lists/countries.html'
r3 = requests.get(url3)
soup3 = BeautifulSoup(r3.text)
birth_html = soup3.find_all(name="div", attrs={"class": "by_year"})
nobel_birth = find_country_birth(birth_html)
##############################################################################
#birth_lat_lon = create_lat_lon(nobel_birth, country_type='birth',
# country_col='birth_country_current_name')
##############################################################################
nobel_birth2 = pd.merge(nobel_birth, birth_lat_lon)
del nobel_birth2['city']
del nobel_birth2['state']
sorted1 = nobel_birth2.sort(columns=['name', 'year']).reset_index(drop=True)
sorted2 = nobel_ages.sort(columns=['name', 'age']).reset_index(drop=True)
merged = pd.merge(sorted1, sorted2, left_index=True, right_index=True, how='outer', on='name')
# merged[merged.name=="Marie Curie"]
all_acquired = pd.merge(nobel_acquired2, merged, left_index=True,
how='inner', on=['name', 'year', 'field'])
##############################################################################
#all_acquired.to_csv('data/all_acquired.csv', encoding='utf-8')
nobel_peace = merged[merged['field'] == 'Peace']
#nobel_peace.to_csv('data/nobel_peace.csv', encoding='utf-8')
##############################################################################
#headers = country_acquired.pop(0)
#df = pd.DataFrame(country_acquired, columns=headers)
#df.head()
#countries = list(set(df.birth_country_new_name))
# url2 = lookup_lat_lon(country=countries[38], key=google_api_key)
# r2 = requests.get(url2)
# country_json = r2.json()
# Get the lat/lon from the Google API!
#lat_lon_birth_countries = get_long_lat(countries, birth_countries=True)
#headers = lat_lon_birth_countries.pop(0)
#birth_countries_df = pd.DataFrame(lat_lon_birth_countries, columns=headers)
#birth_countries_df.head()
|
mit
|
RomainBrault/scikit-learn
|
examples/applications/plot_out_of_core_classification.py
|
51
|
13651
|
"""
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <[email protected]>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
# --------------------------------
#
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
# ----
#
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batches of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
# ------------
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = ['b', 'g', 'r', 'c', 'm', 'y']
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
|
bsd-3-clause
|
LiaoPan/scikit-learn
|
sklearn/ensemble/forest.py
|
8
|
62431
|
"""Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..utils.validation import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score fuction."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
def _parallel_helper(obj, methodname, *args, **kwargs):
"""Private helper to workaround Python 2 pickle limitations"""
return getattr(obj, methodname)(*args, **kwargs)
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('auto', 'balanced', 'balanced_subsample', 'subsample', 'auto')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.class_weight == "subsample":
warn("class_weight='subsample' is deprecated and will be removed in 0.18."
" It was replaced by class_weight='balanced_subsample' "
"using the balanced strategy.", DeprecationWarning)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight not in ['subsample', 'balanced_subsample'] or
not self.bootstrap):
if self.class_weight == 'subsample':
class_weight = 'auto'
elif self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
with warnings.catch_warnings():
if class_weight == "auto":
warnings.simplefilter('ignore', DeprecationWarning)
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
|
bsd-3-clause
|
nicholasmalaya/arcanus
|
exp/oriface/measurement/code/cd.py
|
2
|
4185
|
#!/bin/py
#
# open file
# read contents
# (re)start when third column found
#
import sys
def read_set(path):
#
# data
#
set_names = []
orm = []
lfe = []
file = open(path, "r+")
for line in file:
#
# sep by whitespace
#
line_list = line.split()
set_name=line_list[3:]
set_names.append(' '.join(set_name))
orm.append(line_list[1])
lfe.append(line_list[2])
#
# clean up
#
file.close()
#
# exit
#
return set_names,orm,lfe
if __name__ == "__main__":
import numpy as np
# -------------------------------------------------------------------------------
# open and read file
# -------------------------------------------------------------------------------
path1="../data/series1.lvm"
path2="../data/series2.lvm"
s1,o1,l1 = read_set(path1)
s2,o2,l2 = read_set(path2)
# -------------------------------------------------------------------------------
# Calculate Qs
# -------------------------------------------------------------------------------
rho = 0.074887
mu = 3.8364971e-7
nu = mu/rho
d = 1.8227
D = 4.0
beta = d/D
print 'beta is', beta
l1 = np.array(map(float, o1))
l2 = np.array(map(float, o2))
q1 = 20.5*1.004*l1
q2 = 20.5*1.004*l2
re1 = q1 * D /(np.pi*nu*(d**2.0)/4.0)
re2 = q2 * D /(np.pi*nu*(d**2.0)/4.0)
cd1 = q1 * ((1-beta**4)**(0.5) /(np.pi*(d**2.0)/4.0)) * (rho/(2*l1))**0.5
cd2 = q2 * ((1-beta**4)**(0.5) /(np.pi*(d**2.0)/4.0)) * (rho/(2*l2))**0.5
# -------------------------------------------------------------------------------
# least squares curve fit
# -------------------------------------------------------------------------------
# import numpy as np
# from scipy import stats
# height = [float(i) for i in height]
# voltage = [float(i) for i in voltage]
# (slope, intercept, r_value, p_value, std_err) = stats.linregress(height,voltage)
# print "r-squared:", r_value**2
# print 'p_value', p_value
# print 'slope: ', slope
# -------------------------------------------------------------------------------
# plot it!
# -------------------------------------------------------------------------------
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.subplot(1, 1, 1)
plt.plot(re1, cd1, 'ko-',label='First Calibration Set',color='blue')
plt.plot(re2, cd2, 'ko--',label='Second Calibration Set',color='black')
plt.title('Calibration of an Oriface Meter')
plt.ylabel(r'$C_d$')
plt.xlabel('Re')
plt.legend(loc='best')
plt.savefig('cd.png')
plt.close()
#
# steady as she goes
#
sys.exit(0)
# -------------------------------------------------------------------------------
# nick
# 9/29/15
# -------------------------------------------------------------------------------
# LabVIEW Measurement
# Writer_Version 2
# Reader_Version 2
# Separator Tab
# Decimal_Separator .
# Multi_Headings No
# X_Columns One
# Time_Pref Relative
# Operator Methods Students
# Description Don't delete initial header
# Date 2015/09/23
# Time 15:07:04.9949688911437988282
# ***End_of_Header***
# Channels 2
# Samples 1 1
# Date 2015/09/23 2015/09/23
# Time 15:07:04.9949688911437988282 15:07:04.9949688911437988282
# X_Dimension Time Time
# X0 0.0000000000000000E+0 0.0000000000000000E+0
# Delta_X 1.000000 1.000000
# ***End_of_Header***
# X_Value Untitled Untitled 1 Comment
#
# set 2--
# LabVIEW Measurement
# Writer_Version 2
# Reader_Version 2
# Separator Tab
# Decimal_Separator .
# Multi_Headings No
# X_Columns One
# Time_Pref Relative
# Operator Methods Students
# Description Don't delete initial header
# Date 2015/09/23
# Time 14:23:21.3099026679992675781
# ***End_of_Header***
# Channels 2
# Samples 1 1
# Date 1903/12/31 1903/12/31
# Time 18:00:00 18:00:00
# X_Dimension Time Time
# X0 0.0000000000000000E+0 0.0000000000000000E+0
# Delta_X 1.000000 1.000000
# ***End_of_Header***
# X_Value Untitled Untitled 1 Comment
|
mit
|
Gamebasis/3DGamebasisServer
|
GameData/blender-2.71-windows64/2.71/python/lib/site-packages/numpy/lib/recfunctions.py
|
17
|
35016
|
"""
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for matplotlib.
They have been rewritten and extended for convenience.
"""
from __future__ import division, absolute_import, print_function
import sys
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.lib._iotools import _is_string_like
from numpy.compat import basestring
if sys.version_info[0] < 3:
from future_builtins import zip
_check_fill_value = np.ma.core._check_fill_value
__all__ = ['append_fields',
'drop_fields',
'find_duplicates',
'get_fieldstructure',
'join_by',
'merge_arrays',
'rec_append_fields', 'rec_drop_fields', 'rec_join',
'recursive_fill_fields', 'rename_fields',
'stack_arrays',
]
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.0), (2, 20.0), (0, 0.0)],
dtype=[('A', '<i4'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames) or None
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
are flattend beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names:
listnames.extend(get_names_flat(current))
return tuple(listnames) or None
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return ndtype.descr
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
names = current.names or ()
if len(names) > 1:
newdtype.append(('', current.descr))
else:
newdtype.extend(current.descr)
return np.dtype(newdtype).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields as keys and a list of parent fields as values.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
# if (lastparent[-1] != lastname):
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents or None
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
for f in _izip_fields_flat(tuple(element)):
yield f
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if hasattr(element, '__iter__') and not isinstance(element, basestring):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
for f in _izip_fields(element):
yield f
else:
yield element
def izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarray : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# OK, that's a complete ripoff from Python2.6 itertools.izip_longest
def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop):
"Yields the fill_value or raises IndexError"
yield counter()
#
fillers = itertools.repeat(fill_value)
iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays]
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
#
try:
for tup in zip(*iters):
yield tuple(zipfunc(tup))
except IndexError:
pass
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).items():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def merge_arrays(seqarrays,
fill_value= -1, flatten=False, usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
mask = [(False, False) (False, False) (True, False)],
fill_value = (999999, 1e+20),
dtype = [('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
... usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('a', '<i4'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
* depending on what its corresponding type:
-1 for integers
-1.0 for floating point numbers
'-' for characters
'-1' for strings
True for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarray as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
if (not flatten) or \
(zip_descr((seqarrays,), flatten=True) == seqdtype.descr):
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Make sure we have named fields
if not seqdtype.names:
seqdtype = [('', seqdtype)]
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = [np.asanyarray(_m) for _m in seqarrays]
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = zip_descr(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the fields
to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype (`asrecarray=False`)
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
>>> rfn.drop_fields(a, 'a')
array([((2.0, 3),), ((5.0, 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))],
dtype=[('a', '<i4'), ('b', [('bb', '<i4')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)],
dtype=[('a', '<i4')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names, ]
else:
drop_names = set(drop_names)
#
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
#
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
#
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))],
dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names:
newdtype.append((newname,
_recursive_rename_fields(current, namemapper)))
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def append_fields(base, names, data, dtypes=None,
fill_value= -1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names) != len(data):
msg = "The number of arrays does not match the number of names"
raise ValueError(msg)
elif isinstance(names, basestring):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
else :
if not isinstance(dtypes, (tuple, list)):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, a dtype, or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(max(len(base), len(data)),
dtype=base.dtype.descr + data.dtype.descr)
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
seqarrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`)
or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`) or
just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', float), ('C', float)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
('c', 30.0, 300.0)],
mask = [(False, False, True) (False, False, True) (False, False, False)
(False, False, False) (False, False, False)],
fill_value = ('N/A', 1e+20, 1e+20),
dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = dtype_l.descr
names = [_[0] for _ in newdescr]
for dtype_n in ndtype[1:]:
for descr in dtype_n.descr:
name = descr[0] or ''
if name not in names:
newdescr.append(descr)
names.append(name)
else:
nameidx = names.index(name)
current_descr = newdescr[nameidx]
if autoconvert:
if np.dtype(descr[1]) > np.dtype(current_descr[-1]):
current_descr = list(current_descr)
current_descr[-1] = descr[1]
newdescr[nameidx] = tuple(current_descr)
elif descr[1] != current_descr[-1]:
raise TypeError("Incompatible type '%s' <> '%s'" % \
(dict(newdescr)[name], descr[1]))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
... # XXX: judging by the output, the ignoremask flag has no effect
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array.
An exception is raised if the `key` field cannot be found in the two input
arrays.
Neither `r1` nor `r2` should have any duplicates along `key`: the presence
of duplicates will make the output quite unreliable. Note that duplicates
are not looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of r1
not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1 not
in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present in r2
but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present in r1
but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`)
or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`) or
just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for the
two arrays and concatenating the result. This array is then sorted, and
the common entries selected. The output is constructed by filling the fields
with the selected entries. Matching is not preserved if there are some
duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError("The 'jointype' argument should be in 'inner', "\
"'outer' or 'leftouter' (got '%s' instead)" % jointype)
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
# Check the keys
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
(nb1, nb2) = (len(r1), len(r2))
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Check the names for collision
if (set.intersection(set(r1names), set(r2names)).difference(key) and
not (r1postfix or r2postfix)):
msg = "r1 and r2 contain common names, r1postfix and r2postfix "
msg += "can't be empty"
raise ValueError(msg)
# Make temporary arrays of just the keys
r1k = drop_fields(r1, [n for n in r1names if n not in key])
r2k = drop_fields(r2, [n for n in r2names if n not in key])
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array .......
# Start with the key fields
ndtype = [list(_) for _ in r1k.dtype.descr]
# Add the other fields
ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key)
# Find the new list of names (it may be different from r1names)
names = list(_[0] for _ in ndtype)
for desc in r2.dtype.descr:
desc = list(desc)
name = desc[0]
# Have we seen the current name already ?
if name in names:
nameidx = ndtype.index(desc)
current = ndtype[nameidx]
# The current field is part of the key: take the largest dtype
if name in key:
current[-1] = max(desc[1], current[-1])
# The current field is not part of the key: add the suffixes
else:
current[0] += r1postfix
desc[0] += r2postfix
ndtype.insert(nameidx + 1, desc)
#... we haven't: just add the description to the current list
else:
names.extend(desc[0])
ndtype.append(desc)
# Revert the elements to tuples
ndtype = [tuple(_) for _ in ndtype]
# Find the largest nb of common fields : r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names or (f in r2names and not r2postfix and not f in key):
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names or (f in r1names and not r1postfix and f not in key):
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs)
|
gpl-3.0
|
karenyyng/george
|
docs/_code/hyper_sample_results.py
|
4
|
1176
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A reproduction of Figure 5.6 from Rasmussen & Williams (2006).
http://www.gaussianprocess.org/gpml/
"""
from __future__ import division, print_function
import sys
import numpy as np
import cPickle as pickle
import statsmodels.api as sm
import matplotlib.pyplot as pl
# Load the dataset.
data = sm.datasets.get_rdataset("co2").data
t = np.array(data.time)
y = np.array(data.co2)
# Load the results.
chain, _, gp = pickle.load(open(sys.argv[1], "rb"))
# Set up the figure.
fig = pl.figure(figsize=(6, 3.5))
ax = fig.add_subplot(111)
ax.plot(t, y, ".k", ms=2)
ax.set_xlabel("year")
ax.set_ylabel("CO$_2$ in ppm")
fig.subplots_adjust(left=0.15, bottom=0.2, right=0.99, top=0.95)
# Plot the predictions.
x = np.linspace(max(t), 2025, 250)
for i in range(50):
# Choose a random walker and step.
w = np.random.randint(chain.shape[0])
n = np.random.randint(2000, chain.shape[1])
gp.kernel.pars = np.exp(chain[w, n])
# Plot a single sample.
ax.plot(x, gp.sample_conditional(y, x), "k", alpha=0.3)
ax.set_xlim(min(t), 2025.0)
ax.set_ylim(min(y), 420.0)
fig.savefig("../_static/hyper/mcmc.png", dpi=150)
|
mit
|
mgahsan/QuantEcon.py
|
examples/lq_permanent_1.py
|
7
|
1664
|
"""
Filename: lq_permanent_1.py
Authors: John Stachurski and Thomas J. Sargent
A permanent income / life-cycle model with iid income
"""
import numpy as np
import matplotlib.pyplot as plt
from quantecon import LQ
# == Model parameters == #
r = 0.05
beta = 1 / (1 + r)
T = 45
c_bar = 2
sigma = 0.25
mu = 1
q = 1e6
# == Formulate as an LQ problem == #
Q = 1
R = np.zeros((2, 2))
Rf = np.zeros((2, 2))
Rf[0, 0] = q
A = [[1 + r, -c_bar + mu],
[0, 1]]
B = [[-1],
[0]]
C = [[sigma],
[0]]
# == Compute solutions and simulate == #
lq = LQ(Q, R, A, B, C, beta=beta, T=T, Rf=Rf)
x0 = (0, 1)
xp, up, wp = lq.compute_sequence(x0)
# == Convert back to assets, consumption and income == #
assets = xp[0, :] # a_t
c = up.flatten() + c_bar # c_t
income = wp[0, 1:] + mu # y_t
# == Plot results == #
n_rows = 2
fig, axes = plt.subplots(n_rows, 1, figsize=(12, 10))
plt.subplots_adjust(hspace=0.5)
for i in range(n_rows):
axes[i].grid()
axes[i].set_xlabel(r'Time')
bbox = (0., 1.02, 1., .102)
legend_args = {'bbox_to_anchor': bbox, 'loc': 3, 'mode': 'expand'}
p_args = {'lw': 2, 'alpha': 0.7}
axes[0].plot(list(range(1, T+1)), income, 'g-', label="non-financial income",
**p_args)
axes[0].plot(list(range(T)), c, 'k-', label="consumption", **p_args)
axes[0].legend(ncol=2, **legend_args)
axes[1].plot(list(range(1, T+1)), np.cumsum(income - mu), 'r-',
label="cumulative unanticipated income", **p_args)
axes[1].plot(list(range(T+1)), assets, 'b-', label="assets", **p_args)
axes[1].plot(list(range(T)), np.zeros(T), 'k-')
axes[1].legend(ncol=2, **legend_args)
plt.show()
|
bsd-3-clause
|
commaai/panda
|
tests/safety/libpandasafety_py.py
|
1
|
2258
|
import os
import subprocess
from cffi import FFI
can_dir = os.path.dirname(os.path.abspath(__file__))
libpandasafety_fn = os.path.join(can_dir, "libpandasafety.so")
subprocess.check_call([f"scons -u -j{os.cpu_count()} --test ."], shell=True, cwd=can_dir)
ffi = FFI()
ffi.cdef("""
typedef struct
{
uint32_t TIR; /*!< CAN TX mailbox identifier register */
uint32_t TDTR; /*!< CAN mailbox data length control and time stamp register */
uint32_t TDLR; /*!< CAN mailbox data low register */
uint32_t TDHR; /*!< CAN mailbox data high register */
} CAN_TxMailBox_TypeDef;
typedef struct
{
uint32_t RIR; /*!< CAN receive FIFO mailbox identifier register */
uint32_t RDTR; /*!< CAN receive FIFO mailbox data length control and time stamp register */
uint32_t RDLR; /*!< CAN receive FIFO mailbox data low register */
uint32_t RDHR; /*!< CAN receive FIFO mailbox data high register */
} CAN_FIFOMailBox_TypeDef;
typedef struct
{
uint32_t CNT;
} TIM_TypeDef;
void set_controls_allowed(bool c);
bool get_controls_allowed(void);
void set_unsafe_mode(int mode);
int get_unsafe_mode(void);
void set_relay_malfunction(bool c);
bool get_relay_malfunction(void);
void set_gas_interceptor_detected(bool c);
bool get_gas_interceptor_detetcted(void);
int get_gas_interceptor_prev(void);
bool get_gas_pressed_prev(void);
bool get_brake_pressed_prev(void);
void set_torque_meas(int min, int max);
int get_torque_meas_min(void);
int get_torque_meas_max(void);
void set_torque_driver(int min, int max);
int get_torque_driver_min(void);
int get_torque_driver_max(void);
void set_desired_torque_last(int t);
void set_rt_torque_last(int t);
void set_desired_angle_last(int t);
bool get_cruise_engaged_prev(void);
bool get_vehicle_moving(void);
int get_hw_type(void);
void set_timer(uint32_t t);
int safety_rx_hook(CAN_FIFOMailBox_TypeDef *to_send);
int safety_tx_hook(CAN_FIFOMailBox_TypeDef *to_push);
int safety_fwd_hook(int bus_num, CAN_FIFOMailBox_TypeDef *to_fwd);
int set_safety_hooks(uint16_t mode, int16_t param);
void init_tests(void);
void init_tests_honda(void);
void set_honda_fwd_brake(bool);
void set_honda_alt_brake_msg(bool);
void set_honda_bosch_long(bool c);
int get_honda_hw(void);
""")
libpandasafety = ffi.dlopen(libpandasafety_fn)
|
mit
|
TobiasMue/paparazzi
|
sw/tools/tcp_aircraft_server/phoenix/__init__.py
|
86
|
4470
|
#Copyright 2014, Antoine Drouin
"""
Phoenix is a Python library for interacting with Paparazzi
"""
import math
"""
Unit convertions
"""
def rad_of_deg(d): return d/180.*math.pi
def deg_of_rad(r): return r*180./math.pi
def rps_of_rpm(r): return r*2.*math.pi/60.
def rpm_of_rps(r): return r/2./math.pi*60.
def m_of_inch(i): return i*0.0254
"""
Plotting
"""
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
my_title_spec = {'color' : 'k', 'fontsize' : 20 }
def save_if(filename):
if filename: matplotlib.pyplot.savefig(filename, dpi=80)
def prepare_fig(fig=None, window_title=None, figsize=(20.48, 10.24), margins=None):
if fig == None:
fig = plt.figure(figsize=figsize)
# else:
# plt.figure(fig.number)
if margins:
left, bottom, right, top, wspace, hspace = margins
fig.subplots_adjust(left=left, right=right, bottom=bottom, top=top,
hspace=hspace, wspace=wspace)
if window_title:
fig.canvas.set_window_title(window_title)
return fig
def decorate(ax, title=None, xlab=None, ylab=None, legend=None, xlim=None, ylim=None):
ax.xaxis.grid(color='k', linestyle='-', linewidth=0.2)
ax.yaxis.grid(color='k', linestyle='-', linewidth=0.2)
if xlab:
ax.xaxis.set_label_text(xlab)
if ylab:
ax.yaxis.set_label_text(ylab)
if title:
ax.set_title(title, my_title_spec)
if legend <> None:
ax.legend(legend, loc='best')
if xlim <> None:
ax.set_xlim(xlim[0], xlim[1])
if ylim <> None:
ax.set_ylim(ylim[0], ylim[1])
"""
Messages
"""
#: dictionary mapping the C type to its length in bytes (e.g char -> 1)
TYPE_TO_LENGTH_MAP = {
"char" : 1,
"uint8" : 1,
"int8" : 1,
"uint16" : 2,
"int16" : 2,
"uint32" : 4,
"int32" : 4,
"float" : 4,
"double" : 8,
}
#: dictionary mapping the C type to correct format string
TYPE_TO_PRINT_MAP = {
float : "%f",
str : "%s",
chr : "%c",
int : "%d"
}
ACID_ALL = 0xFF
ACID_TEST = 0xFE
ACID_GROUNDSTATION = 0xFD
#: dictionary mapping debug types to format characters
DEBUG_MESSAGES = {
"DEBUG_UINT8" : "%d",
"DEBUG_INT32" : "%d",
"DEBUG_FLOAT" : "%#f"
}
"""
Binary logs
See format description in sw/airborne/subsystems/datalink/fms_link.c
"""
import struct
def hex_of_bin(b): return ' '.join( [ "%02X" % ord( x ) for x in b ] )
import pdb
def read_binary_log(filename, tick_freq = 2*512.):
f = open(filename, "rb")
d = f.read()
packet_header_len = 6
msg_header_len = 2
def read_packet(d, packet_start):
payload_start = packet_start+packet_header_len
timestamp, payload_len = struct.unpack("IH", d[packet_start:payload_start])
msgs = read_packet_payload(d, payload_start, payload_len)
next_packet = payload_start+payload_len+2
return timestamp, msgs, next_packet
def read_packet_payload(d, s, l):
msgs = []
packet_end = s+l; msg_start = s
while msg_start<packet_end:
payload_start = msg_start+msg_header_len
msg_len, msg_id = struct.unpack("BB", d[msg_start:payload_start])
payload_end = payload_start+msg_len
msg_payload = d[payload_start:payload_end]
msgs.append([msg_id, msg_payload])
#print msg_id, msg_len, hex_of_bin(msg_payload)
msg_start = payload_end
return msgs
packets = []
packet_start=0
while packet_start<len(d):
timestamp, msgs, next_packet = read_packet(d, packet_start)
packets.append([timestamp/tick_freq, msgs])
#print timestamp, msgs
packet_start = next_packet
f.close()
return packets
def extract_from_binary_log(protocol, packets, msg_names, t_min=None, t_max=None):
ret = [{'time':[], 'data':[]} for m in msg_names]
if t_min == None: t_min = packets[0][0]
if t_max == None: t_max = packets[-1][0]
for t, msgs in packets:
if t>= t_min and t<= t_max:
for id, payload in msgs:
m = protocol.get_message_by_id('telemetry', id)
try: i = msg_names.index(m.name)
except: pass
finally: ret[i]['time'].append(t); ret[i]['data'].append(m.unpack_scaled_values(payload))
return ret
|
gpl-2.0
|
swharden/SWHLab
|
doc/uses/EPSCs-and-IPSCs/variance method/2016-12-17 02 graphTime.py
|
1
|
3764
|
import os
import sys
sys.path.append("../../../../")
import swhlab
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import numpy as np
import time
class ABF2(swhlab.ABF):
def phasicTonic(self,m1=None,m2=None,chunkMs=50,quietPercentile=10,
histResolution=.5,plotToo=False,rmsExpected=5):
"""
chunkMs should be ~50 ms or greater.
bin sizes must be equal to or multiples of the data resolution.
transients smaller than the expected RMS will be silenced.
"""
# prepare sectioning values to be used later
m1=0 if m1 is None else m1*self.pointsPerSec
m2=len(abf.sweepY) if m2 is None else m2*self.pointsPerSec
m1,m2=int(m1),int(m2)
# prepare histogram values to be used later
padding=200 # pA or mV of maximum expected deviation
chunkPoints=int(chunkMs*self.pointsPerMs)
histBins=int((padding*2)/histResolution)
# center the data at 0 using peak histogram, not the mean
Y=self.sweepY[m1:m2]
hist,bins=np.histogram(Y,bins=2*padding)
Yoffset=bins[np.where(hist==max(hist))[0][0]]
Y=Y-Yoffset # we don't have to, but PDF math is easier
# calculate all histogram
nChunks=int(len(Y)/chunkPoints)
hist,bins=np.histogram(Y,bins=histBins,range=(-padding,padding))
hist=hist/len(Y) # count as a fraction of total
Xs=bins[1:]
# get baseline data from chunks with smallest variance
chunks=np.reshape(Y[:nChunks*chunkPoints],(nChunks,chunkPoints))
variances=np.var(chunks,axis=1)
percentiles=np.empty(len(variances))
for i,variance in enumerate(variances):
percentiles[i]=sorted(variances).index(variance)/len(variances)*100
blData=chunks[np.where(percentiles<=quietPercentile)[0]].flatten()
# generate the standard curve and pull it to the histogram height
sigma=np.sqrt(np.var(blData))
center=np.average(blData)+histResolution/2
blCurve=mlab.normpdf(Xs,center,sigma)
blCurve=blCurve*max(hist)/max(blCurve)
# determine the phasic current by subtracting-out the baseline
diff=hist-blCurve
# manually zero-out data which we expect to be within the RMS range
ignrCenter=len(Xs)/2
ignrPad=rmsExpected/histResolution
ignr1,ignt2=int(ignrCenter-ignrPad),int(ignrCenter+ignrPad)
diff[ignr1:ignt2]=0
return diff/len(Y)*abf.pointsPerSec # charge/sec
if __name__=="__main__":
#abfPath=r"X:\Data\2P01\2016\2016-09-01 PIR TGOT"
abfPath=r"C:\Users\scott\Documents\important\demodata"
abf=ABF2(os.path.join(abfPath,"16d14036.abf"))
t=time.perf_counter()
Xs=np.arange(abf.sweeps)*abf.sweepLength
pos,neg=np.zeros(len(Xs)),np.zeros(len(Xs))
for sweep in abf.setsweeps():
phasic=abf.phasicTonic(.75)
neg[sweep],pos[sweep]=np.sum(np.split(phasic,2),1)
t=time.perf_counter()-t
plt.figure(figsize=(10,5))
plt.grid()
plt.title("analysis of %s completed in %.02f S"%(abf.ID,t))
plt.plot(Xs,pos,'.',color='b',alpha=.3)
plt.plot(Xs,swhlab.common.lowpass(pos),'-',color='b',alpha=.5,lw=5,label="upward")
plt.plot(Xs,neg,'.',color='r',alpha=.3)
plt.plot(Xs,swhlab.common.lowpass(neg),'-',color='r',alpha=.5,lw=5,label="downward")
for sweep in abf.comment_times:
plt.axvline(sweep,lw=5,alpha=.5,color='g',ls='--')
plt.axhline(0,color='k',lw=3,alpha=.5)
plt.xlabel("time (secods)")
plt.ylabel("ms * pA / sec")
plt.legend(loc='upper left',shadow=True)
plt.margins(0,.1)
plt.show()
print("DONE")
|
mit
|
DonBeo/scikit-learn
|
sklearn/linear_model/tests/test_least_angle.py
|
11
|
15904
|
from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings, assert_warns_message
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
if __name__ == '__main__':
import nose
nose.runmodule()
|
bsd-3-clause
|
Windy-Ground/scikit-learn
|
sklearn/datasets/tests/test_samples_generator.py
|
181
|
15664
|
from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
|
bsd-3-clause
|
zorroblue/scikit-learn
|
examples/feature_selection/plot_rfe_with_cross_validation.py
|
161
|
1380
|
"""
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
|
bsd-3-clause
|
0x0all/scikit-learn
|
examples/linear_model/plot_sgd_comparison.py
|
167
|
1659
|
"""
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
|
bsd-3-clause
|
kiyo-masui/burst_search
|
burst_search/guppi.py
|
1
|
5526
|
"""Driver scripts and IO for Greenbank GUPPI data.
"""
import numpy as np
# import matplotlib.pyplot as plt
try:
import astropy.io.fits as pyfits
except ImportError:
import pyfits
from . import datasource
from . import manager
from . import preprocess
# GUPPI IO
# --------
class FileSource(datasource.DataSource):
def __init__(self, filename, block=30., overlap=8., **kwargs):
super(FileSource, self).__init__(
source=filename,
block=block,
overlap=overlap,
**kwargs
)
# Read the headers
hdulist = pyfits.open(filename)
mheader = hdulist[0].header
dheader = hdulist[1].header
if mheader['CAL_FREQ']:
cal_period = 1. / mheader['CAL_FREQ']
self._cal_period_samples = int(round(cal_period / dheader['TBIN']))
else:
self._cal_period_samples = 0
self._delta_t_native = dheader['TBIN']
self._nfreq = dheader['NCHAN']
self._freq0 = mheader['OBSFREQ'] - mheader['OBSBW'] / 2.
self._delta_f = dheader['CHAN_BW']
self._mjd = mheader['STT_IMJD']
self._start_time = (mheader['STT_SMJD'] + mheader['STT_OFFS'])
ntime_record, npol, nfreq, one = eval(dheader["TDIM17"])[::-1]
self._ntime_record = ntime_record
hdulist.close()
# Initialize blocking parameters.
record_len = self._ntime_record * self._delta_t_native
self._nrecords_block = int(np.ceil(block / record_len))
self._nrecords_overlap = int(np.ceil(overlap / record_len))
self._next_start_record = 0
@property
def nblocks_left(self):
nrecords_left = get_nrecords(self._source) - self._next_start_record
return int(np.ceil(float(nrecords_left)
/ (self._nrecords_block - self._nrecords_overlap)))
@property
def nblocks_fetched(self):
return (self._next_start_record //
(self._nrecords_block - self._nrecords_overlap))
def get_next_block_native(self):
start_record = self._next_start_record
if self.nblocks_left == 0:
raise StopIteration()
t0 = start_record * self._ntime_record * self._delta_t_native
t0 += self._delta_t_native / 2
hdulist = pyfits.open(self._source)
data = read_records(
hdulist,
self._next_start_record,
self._next_start_record + self._nrecords_block,
)
hdulist.close()
self._next_start_record += (self._nrecords_block
- self._nrecords_overlap)
return t0, data
@property
def cal_period_samples(self):
return self._cal_period_samples
# Driver class
# ------------
class Manager(manager.Manager):
datasource_class = FileSource
def preprocess(self, t0, data):
"""Preprocess the data.
Preprocessing includes simulation.
"""
preprocess.sys_temperature_bandpass(data)
self.simulate(t0, data)
cal_period = self.datasource.cal_period_samples
if cal_period:
preprocess.remove_periodic(data, cal_period)
block_ind = self.datasource.nblocks_fetched
# Preprocess.
# preprocess.sys_temperature_bandpass(data)
if False and block_ind in self._sim_source.coarse_event_schedule():
# Do simulation
sim_events = self._sim_source.generate_events(block_ind)
data += sim_events[:, 0:data.shape[1]]
preprocess.remove_outliers(data, 5, 128)
ntime_pre_filter = data.shape[1]
data = preprocess.highpass_filter(data, manager.HPF_WIDTH
/ self.datasource.delta_t)
# This changes t0 by half a window width.
t0 -= (ntime_pre_filter - data.shape[1]) / 2 * self.datasource.delta_t
preprocess.remove_outliers(data, 5)
preprocess.remove_noisy_freq(data, 3)
preprocess.remove_bad_times(data, 2)
preprocess.remove_continuum_v2(data)
preprocess.remove_noisy_freq(data, 3)
return t0, data
# IO Helper functions
# -------------------
def read_records(hdulist, start_record=0, end_record=None):
"""Read and format records from GUPPI PSRFITS file."""
nrecords = len(hdulist[1].data)
if end_record is None or end_record > nrecords:
end_record = nrecords
nrecords_read = end_record - start_record
ntime_record, npol, nfreq, one = hdulist[1].data[0]["DATA"].shape
out_data = np.empty((nfreq, nrecords_read, ntime_record), dtype=np.float32)
for ii in xrange(nrecords_read):
# Read the record.
full_record = hdulist[1].data[start_record + ii]
record = full_record["DATA"]
scals = full_record["DAT_SCL"]
offs = full_record["DAT_OFFS"]
# Interpret as unsigned int (for Stokes I only).
record = record.view(dtype=np.uint8)
offs.shape = (npol, nfreq)
scals.shape = (npol, nfreq)
# Select stokes I and copy.
formated_data = np.transpose(record[:, 0, :, 0]) * scals[0, :, None]
formated_data += offs[0, :, None]
out_data[:, ii, :] = formated_data
out_data.shape = (nfreq, nrecords_read * ntime_record)
return out_data
def get_nrecords(filename):
hdulist = pyfits.open(filename, 'readonly')
nrecords = len(hdulist[1].data)
hdulist.close()
return nrecords
|
gpl-2.0
|
powerjg/gem5-ci-test
|
util/dram_sweep_plot.py
|
10
|
6666
|
#!/usr/bin/env python2
# Copyright (c) 2014 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
try:
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
except ImportError:
print "Failed to import matplotlib and numpy"
exit(-1)
import sys
import re
# Determine the parameters of the sweep from the simout output, and
# then parse the stats and plot the 3D surface corresponding to the
# different combinations of parallel banks, and stride size, as
# generated by the config/dram/sweep.py script
def main():
if len(sys.argv) != 3:
print "Usage: ", sys.argv[0], "-u|p|e <simout directory>"
exit(-1)
if len(sys.argv[1]) != 2 or sys.argv[1][0] != '-' or \
not sys.argv[1][1] in "upe":
print "Choose -u (utilisation), -p (total power), or -e " \
"(power efficiency)"
exit(-1)
# Choose the appropriate mode, either utilisation, total power, or
# efficiency
mode = sys.argv[1][1]
try:
stats = open(sys.argv[2] + '/stats.txt', 'r')
except IOError:
print "Failed to open ", sys.argv[2] + '/stats.txt', " for reading"
exit(-1)
try:
simout = open(sys.argv[2] + '/simout', 'r')
except IOError:
print "Failed to open ", sys.argv[2] + '/simout', " for reading"
exit(-1)
# Get the burst size, number of banks and the maximum stride from
# the simulation output
got_sweep = False
for line in simout:
match = re.match("DRAM sweep with "
"burst: (\d+), banks: (\d+), max stride: (\d+)", line)
if match:
burst_size = int(match.groups(0)[0])
banks = int(match.groups(0)[1])
max_size = int(match.groups(0)[2])
got_sweep = True
simout.close()
if not got_sweep:
print "Failed to establish sweep details, ensure simout is up-to-date"
exit(-1)
# Now parse the stats
peak_bw = []
bus_util = []
avg_pwr = []
for line in stats:
match = re.match(".*busUtil\s+(\d+\.\d+)\s+#.*", line)
if match:
bus_util.append(float(match.groups(0)[0]))
match = re.match(".*peakBW\s+(\d+\.\d+)\s+#.*", line)
if match:
peak_bw.append(float(match.groups(0)[0]))
match = re.match(".*averagePower\s+(\d+\.?\d*)\s+#.*", line)
if match:
avg_pwr.append(float(match.groups(0)[0]))
stats.close()
# Sanity check
if not (len(peak_bw) == len(bus_util) and len(bus_util) == len(avg_pwr)):
print "Peak bandwidth, bus utilisation, and average power do not match"
exit(-1)
# Collect the selected metric as our Z-axis, we do this in a 2D
# grid corresponding to each iteration over the various stride
# sizes.
z = []
zs = []
i = 0
for j in range(len(peak_bw)):
if mode == 'u':
z.append(bus_util[j])
elif mode == 'p':
z.append(avg_pwr[j])
elif mode == 'e':
# avg_pwr is in mW, peak_bw in MiByte/s, bus_util in percent
z.append(avg_pwr[j] / (bus_util[j] / 100.0 * peak_bw[j] / 1000.0))
else:
print "Unexpected mode %s" % mode
exit(-1)
i += 1
# If we have completed a sweep over the stride sizes,
# start anew
if i == max_size / burst_size:
zs.append(z)
z = []
i = 0
# We should have a 2D grid with as many columns as banks
if len(zs) != banks:
print "Unexpected number of data points in stats output"
exit(-1)
fig = plt.figure()
ax = fig.gca(projection='3d')
X = np.arange(burst_size, max_size + 1, burst_size)
Y = np.arange(1, banks + 1, 1)
X, Y = np.meshgrid(X, Y)
# the values in the util are banks major, so we see groups for each
# stride size in order
Z = np.array(zs)
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
# Change the tick frequency to 64
start, end = ax.get_xlim()
ax.xaxis.set_ticks(np.arange(start, end + 1, 64))
ax.set_xlabel('Bytes per activate')
ax.set_ylabel('Banks')
if mode == 'u':
ax.set_zlabel('Utilisation (%)')
elif mode == 'p':
ax.set_zlabel('Power (mW)')
elif mode == 'e':
ax.set_zlabel('Power efficiency (mW / GByte / s)')
# Add a colorbar
fig.colorbar(surf, shrink=0.5, pad=.1, aspect=10)
plt.show()
if __name__ == "__main__":
main()
|
bsd-3-clause
|
nburn42/tensorflow
|
tensorflow/contrib/timeseries/examples/known_anomaly.py
|
14
|
7880
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example of using an exogenous feature to ignore a known anomaly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
from os import path
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/changepoints.csv")
def state_space_esitmator(exogenous_feature_columns):
"""Constructs a StructuralEnsembleRegressor."""
def _exogenous_update_condition(times, features):
del times # unused
# Make exogenous updates sparse by setting an update condition. This in
# effect allows missing exogenous features: if the condition evaluates to
# False, no update is performed. Otherwise we sometimes end up with "leaky"
# updates which add unnecessary uncertainty to the model even when there is
# no changepoint.
return tf.equal(tf.squeeze(features["is_changepoint"], axis=-1), "yes")
return (
tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=12,
# Extract a smooth period by constraining the number of latent values
# being cycled between.
cycle_num_latent_values=3,
num_features=1,
exogenous_feature_columns=exogenous_feature_columns,
exogenous_update_condition=_exogenous_update_condition),
# Use truncated backpropagation with a window size of 64, batching
# together 4 of these windows (random offsets) per training step. Training
# with exogenous features often requires somewhat larger windows.
4, 64)
def autoregressive_esitmator(exogenous_feature_columns):
input_window_size = 8
output_window_size = 2
return (
tf.contrib.timeseries.ARRegressor(
periodicities=12,
num_features=1,
input_window_size=input_window_size,
output_window_size=output_window_size,
exogenous_feature_columns=exogenous_feature_columns),
64, input_window_size + output_window_size)
def train_and_evaluate_exogenous(
estimator_fn, csv_file_name=_DATA_FILE, train_steps=300):
"""Training, evaluating, and predicting on a series with changepoints."""
# Indicate the format of our exogenous feature, in this case a string
# representing a boolean value.
string_feature = tf.feature_column.categorical_column_with_vocabulary_list(
key="is_changepoint", vocabulary_list=["no", "yes"])
# Specify the way this feature is presented to the model, here using a one-hot
# encoding.
one_hot_feature = tf.feature_column.indicator_column(
categorical_column=string_feature)
estimator, batch_size, window_size = estimator_fn(
exogenous_feature_columns=[one_hot_feature])
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
# Indicate the format of our CSV file. First we have two standard columns,
# one for times and one for values. The third column is a custom exogenous
# feature indicating whether each timestep is a changepoint. The
# changepoint feature name must match the string_feature column name
# above.
column_names=(tf.contrib.timeseries.TrainEvalFeatures.TIMES,
tf.contrib.timeseries.TrainEvalFeatures.VALUES,
"is_changepoint"),
# Indicate dtypes for our features.
column_dtypes=(tf.int64, tf.float32, tf.string),
# This CSV has a header line; here we just ignore it.
skip_header_lines=1)
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=batch_size, window_size=window_size)
estimator.train(input_fn=train_input_fn, steps=train_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Create an input_fn for prediction, with a simulated changepoint. Since all
# of the anomalies in the training data are explained by the exogenous
# feature, we should get relatively confident predictions before the indicated
# changepoint (since we are telling the model that no changepoint exists at
# those times) and relatively uncertain predictions after.
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100,
exogenous_features={
"is_changepoint": [["no"] * 49 + ["yes"] + ["no"] * 50]})))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
# Indicate the locations of the changepoints for plotting vertical lines.
anomaly_locations = []
with open(csv_file_name, "r") as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
if row["is_changepoint"] == "yes":
anomaly_locations.append(int(row["time"]))
anomaly_locations.append(predictions["times"][49])
return (times, observed, all_times, mean, upper_limit, lower_limit,
anomaly_locations)
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit, anomaly_locations):
"""Plot the time series and anomalies in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.axvline(anomaly_locations[0], linestyle="dotted", label="changepoints")
for anomaly_location in anomaly_locations[1:]:
pyplot.axvline(anomaly_location, linestyle="dotted")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
make_plot("Ignoring a known anomaly (state space)",
*train_and_evaluate_exogenous(
estimator_fn=state_space_esitmator))
make_plot("Ignoring a known anomaly (autoregressive)",
*train_and_evaluate_exogenous(
estimator_fn=autoregressive_esitmator, train_steps=3000))
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
|
apache-2.0
|
survey-methods/samplics
|
src/samplics/datasets/datasets.py
|
1
|
5884
|
from __future__ import annotations
from typing import Optional
from os.path import dirname, join
import pandas as pd
def _load_dataset(
file_name: str,
colnames: Optional[list],
name: str,
description: str,
design: dict,
source: str,
) -> None:
module_path = dirname(__file__)
file_path = join(module_path, "data", file_name)
df = pd.read_csv(file_path)
if colnames is not None:
df = df[colnames]
nrows, ncols = df.shape
return {
"name": name,
"description": description,
"nrows": nrows,
"ncols": ncols,
"data": df,
"design": design,
"source": source,
}
def load_psu_frame():
name = "PSU Frame"
description = "A simulated census data."
design = {}
source = ""
return _load_dataset(
"psu_frame.csv",
colnames=None,
name=name,
description=description,
design=design,
source=source,
)
def load_psu_sample():
colnames = ["cluster", "region", "psu_prob"]
name = "PSU Sample"
description = "The PSU sample obtained from the simulated PSU frame."
design = {}
source = ""
return _load_dataset(
"psu_sample.csv",
colnames=colnames,
name=name,
description=description,
design=design,
source=source,
)
def load_ssu_sample():
colnames = ["cluster", "household", "ssu_prob"]
name = "SSU Sample"
description = "The SSU sample obtained from the simulated SSU frame."
design = {}
source = ""
return _load_dataset(
"ssu_sample.csv",
colnames=colnames,
name=name,
description=description,
design=design,
source=source,
)
def load_nhanes2():
colnames = [
"stratid",
"psuid",
"race",
"highbp",
"highlead",
"zinc",
"diabetes",
"finalwgt",
]
name = "NHANES II Subsample"
description = "A subset of NHANES II data. This file is not meant to be representative of NHANES II. It is just an subset to illustrate the syntax in this tutorial."
design = {}
source = ""
return _load_dataset(
"nhanes2.csv",
colnames=colnames,
name=name,
description=description,
design=design,
source=source,
)
def load_nhanes2brr():
colnames = None
name = "NHANES II Subsample with bootstrap weights"
description = "A subset of NHANES II data with bootstrap weights. This file is not meant to be representative of NHANES II. It is just an subset to illustrate the syntax in this tutorial."
design = {}
source = ""
return _load_dataset(
"nhanes2brr_subset.csv",
colnames=colnames,
name=name,
description=description,
design=design,
source=source,
)
def load_nhanes2jk():
colnames = None
name = "NHANES II Subsample with jackknife weights"
description = "A subset of NHANES II data with jackknife weights. This file is not meant to be representative of NHANES II. It is just an subset to illustrate the syntax in this tutorial."
design = {}
source = ""
return _load_dataset(
"nhanes2jk_subset.csv",
colnames=colnames,
name=name,
description=description,
design=design,
source=source,
)
def load_nmhis():
colnames = None
name = "NMIHS Subsample"
description = "A subset of NMHIS data. This file is not meant to be representative of NMHIS. It is just an subset to illustrate the syntax in this tutorial."
design = {}
source = ""
return _load_dataset(
"nmihs_subset.csv",
colnames=colnames,
name=name,
description=description,
design=design,
source=source,
)
def load_auto():
colnames = None
name = "Auto Sample"
description = "The Auto sample data."
design = {}
source = ""
return _load_dataset(
"auto.csv",
colnames=colnames,
name=name,
description=description,
design=design,
source=source,
)
def load_birth():
colnames = None
name = "Birth Sample"
description = "The Birth sample data."
design = {}
source = ""
return _load_dataset(
"birth.csv",
colnames=colnames,
name=name,
description=description,
design=design,
source=source,
)
def load_county_crop():
colnames = ["county_id", "corn_area", "soybeans_area", "corn_pixel", "soybeans_pixel"]
name = "County Crop Sample"
description = "The County Crop Areas sample data."
design = {}
source = ""
return _load_dataset(
"countycrop.csv",
colnames=colnames,
name=name,
description=description,
design=design,
source=source,
)
def load_county_crop_means():
colnames = [
"county_id",
"samp_segments",
"pop_segments",
"ave_corn_pixel",
"ave_soybeans_pixel",
]
name = "County Crop Area Means"
description = "The County Crop Area Means data."
design = {}
source = ""
return _load_dataset(
"countycrop_means.csv",
colnames=colnames,
name=name,
description=description,
design=design,
source=source,
)
def load_expenditure_milk():
colnames = [
"major_area",
"small_area",
"samp_size",
"direct_est",
"std_error",
"coef_var",
]
name = "Expenditure on Milk"
description = "The expenditure on milk data."
design = {}
source = ""
return _load_dataset(
"expenditure_on_milk.csv",
colnames=colnames,
name=name,
description=description,
design=design,
source=source,
)
|
mit
|
alexeyignatiev/mkplot
|
cactus.py
|
1
|
3929
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
##
## cactus.py
##
## Created on: Jun 05, 2015
## Author: Alexey S. Ignatiev
## E-mail: [email protected]
##
#
#==============================================================================
import json
import matplotlib.pyplot as plt
from matplotlib import __version__ as mpl_version
import math
import numpy as np
import os
from plot import Plot
import six
#
#==============================================================================
class Cactus(Plot, object):
"""
Cactus plot class.
"""
def __init__(self, options):
"""
Cactus constructor.
"""
super(Cactus, self).__init__(options)
with open(self.def_path, 'r') as fp:
self.linestyles = json.load(fp)['cactus_linestyle']
def create(self, data):
"""
Does the plotting.
"""
# making lines
coords = []
for d in data:
coords.append(np.arange(1, len(d[1]) + 1)) # xs (separate for each line)
coords.append(np.array(sorted(d[1])))
lines = plt.plot(*coords, zorder=3)
# setting line styles
if self.byname == False: # by default, assign fist line to best tool
lmap = lambda i: i
else: # assign line styles by tool name
tnames = [(d[0], i) for i, d in enumerate(data)]
tnames.sort(key=lambda pair: pair[0])
tmap = {tn[1]: i for i, tn in enumerate(tnames)}
lmap = lambda i: tmap[i]
for i, l in enumerate(lines):
plt.setp(l, **self.linestyles[lmap(i) % len(self.linestyles)])
# turning the grid on
if not self.no_grid:
plt.grid(True, color=self.grid_color, ls=self.grid_style, lw=self.grid_width, zorder=1)
# axes limits
plt.xlim(self.x_min, self.x_max if self.x_max else math.ceil(max([d[2] for d in data]) / float(100)) * 100)
plt.ylim(self.y_min, self.y_max if self.y_max else self.timeout)
# axes labels
if self.x_label:
plt.xlabel(self.x_label)
else:
plt.xlabel('instances')
if self.y_label:
plt.ylabel(self.y_label)
else:
plt.ylabel('CPU time (s)')
# choosing logarithmic scales if needed
ax = plt.gca()
if self.x_log:
ax.set_xscale('log')
if self.y_log:
ax.set_yscale('log')
# setting ticks
# plt.xticks(np.arange(self.x_min, self.x_max + 1, 2))
# if not self.y_log:
# # plt.yticks(list(plt.yticks()[0]) + [self.timeout])
# ax.set_yticks(range(0, 2 * (int(self.y_max) if self.y_max else int(self.timeout)), 200))
# setting ticks font properties
# set_*ticklables() seems to be not needed in matplotlib 1.5.0
if float(mpl_version[:3]) < 1.5:
ax.set_xticklabels(ax.get_xticks(), self.f_props)
ax.set_yticklabels(ax.get_yticks(), self.f_props)
strFormatter = plt.FormatStrFormatter('%d')
logFormatter = plt.LogFormatterMathtext(base=10)
ax.xaxis.set_major_formatter(strFormatter if not self.x_log else logFormatter)
ax.yaxis.set_major_formatter(strFormatter if not self.y_log else logFormatter)
# making the legend
if self.lgd_loc != 'off':
lgtext = [d[0] for d in data]
lg = ax.legend(lines, lgtext, ncol=self.lgd_ncol, loc=self.lgd_loc, fancybox=self.lgd_fancy, shadow=self.lgd_shadow if self.lgd_alpha == 1.0 else False)
fr = lg.get_frame()
fr.set_lw(1)
fr.set_alpha(self.lgd_alpha)
fr.set_edgecolor('black')
# setting frame thickness
for i in six.itervalues(ax.spines):
i.set_linewidth(1)
plt.savefig(self.save_to, bbox_inches='tight', transparent=self.transparent)
|
mit
|
MichaelAquilina/numpy
|
numpy/core/function_base.py
|
41
|
6518
|
from __future__ import division, absolute_import, print_function
__all__ = ['logspace', 'linspace']
from . import numeric as _nx
from .numeric import result_type, NaN
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop`].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
dtype : dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
.. versionadded:: 1.9.0
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float
Only returned if `retstep` is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
logspace : Samples uniformly distributed in log space.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
num = int(num)
if num < 0:
raise ValueError("Number of samples, %s, must be non-negative." % num)
div = (num - 1) if endpoint else num
# Convert float/complex array scalars to float, gh-3504
start = start * 1.
stop = stop * 1.
dt = result_type(start, stop, float(num))
if dtype is None:
dtype = dt
y = _nx.arange(0, num, dtype=dt)
if num > 1:
delta = stop - start
step = delta / div
if step == 0:
# Special handling for denormal numbers, gh-5437
y /= div
y *= delta
else:
y *= step
else:
# 0 and 1 item long sequences have an undefined step
step = NaN
y += start
if endpoint and num > 1:
y[-1] = stop
if retstep:
return y.astype(dtype, copy=False), step
else:
return y.astype(dtype, copy=False)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Parameters
----------
start : float
``base ** start`` is the starting value of the sequence.
stop : float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length ``num``) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y).astype(dtype)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([ 100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([ 4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start, stop, num=num, endpoint=endpoint)
if dtype is None:
return _nx.power(base, y)
return _nx.power(base, y).astype(dtype)
|
bsd-3-clause
|
MaxStrange/nlp
|
diplomacy/src/training.py
|
1
|
23713
|
"""
This is a front end module for running the program in training mode.
This module was used to train the models and evaluate them.
"""
import os
if not "SSH_CONNECTION" in os.environ:
# Disable annoying TF warnings when importing keras (which imports TF)
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
import data
import itertools
import keras
from keras.callbacks import ModelCheckpoint
from keras.models import Sequential
from keras.layers import Dense, Dropout, Embedding, LSTM
from keras.wrappers.scikit_learn import KerasClassifier
import matplotlib
if "SSH_CONNECTION" in os.environ:
matplotlib.use("agg")
import matplotlib.pyplot as plt
else:
try:
matplotlib.use("Qt5Agg")
except Exception:
print("WARNING: This will work best if you install PyQt5")
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import pickle
import random
import scipy
from sklearn import decomposition, neighbors, svm, tree
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support, roc_curve, auc
from sklearn.model_selection import cross_val_score, StratifiedKFold, train_test_split
from sklearn.neural_network import MLPClassifier
random.seed(12345)
np.random.seed(12345)
np.set_printoptions(precision=2)
cached_Xs = None
cached_Ys = None
cached_ptd = None
cached_binary = True
X_validation_set = None
Y_validation_set = None
def _get_rnn_data(path_to_data=None, binary=True):
"""
"""
# Need to get whole relationships at a time along with labels that are simply whether this feature vector (which is a season)
# is the last turn of a betrayal relationship
# Dimension of Xs should be: (500, x, 10), where x varies from 3 to 10 (i.e., len of relationship)
Xs = [x for x in data.get_X_feed_rnn(path_to_data)]
if binary:
Ys = [y for y in data.get_Y_feed_binary_rnn(path_to_data)]
else:
assert False, "Not yet supported"
return Xs, Ys
def _get_xy(path_to_data=None, binary=True, upsample=True, replicate=False):
"""
Returns Xs, Ys, shuffled.
Keeps back a validation set that you can get via X_validation_set and Y_validation_set.
"""
global cached_Xs
global cached_Ys
global cached_ptd
global cached_binary
if cached_Xs is not None and cached_Ys is not None and cached_ptd == path_to_data and cached_binary == binary and upsample and not replicate:
return cached_Xs, cached_Ys
else:
print("Getting the data. This will take a moment...")
Xs = [x for x in data.get_X_feed(path_to_data, upsample=upsample, replicate=replicate)]
if binary:
Ys = np.array([y for y in data.get_Y_feed_binary(path_to_data, upsample=upsample)])
else:
Ys = np.array([y for y in data.get_Y_feed(Xs, path_to_data, upsample=upsample)])
Xs = np.array([x[1] for x in Xs])
# Shuffle
index_shuf = [i for i in range(len(Xs))]
random.shuffle(index_shuf)
Xs = np.array([Xs[i] for i in index_shuf])
Ys = np.array([Ys[i] for i in index_shuf])
assert(len(Xs) == len(Ys))
# Keep back validation set
global X_validation_set
global Y_validation_set
X_validation_set, Y_validation_set = data.get_validation_set(replicate=replicate)
print("Ones in validation set:", len([y for y in Y_validation_set if y == 1]))
print("Zeros in validation set:", len([y for y in Y_validation_set if y == 0]))
if upsample:
# Only cache upsampled data
cached_Xs = Xs
cached_Ys = Ys
cached_ptd = path_to_data
cached_binary = binary
return Xs, Ys
def plot_confusion_matrix(cm, classes, subplot, normalize=False, title="Confusion matrix", cmap=plt.cm.Blues):
"""
"""
plt.subplot(subplot)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title, fontsize=20, fontweight='bold')
# plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, fontsize=8)
plt.yticks(tick_marks, classes, fontsize=8)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j], fontsize=25,
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
#plt.tight_layout()
if subplot == 231 or subplot == 234:
plt.ylabel('True label', fontsize=15)
if subplot == 234 or subplot == 235 or subplot == 236:
plt.xlabel('Predicted label', fontsize=15)
def train_knn(path_to_data=None, path_to_save_model=None, load_model=False, path_to_load=None, binary=True, subplot=111, title=""):
"""
Trains a knn classifier on the dataset.
If no path_to_data is used, it will assume the default data directory.
If no path_to_save_model is provided, it will save to the local directory.
If load_model is True, it will load the model from the given location and resume training.
If binary is True, the model will be trained to simply detect whether, given three Seasons' worth of messages, there
will be a betrayal between these users in this order phase.
"""
print("Training the KNN with inverse weights...")
if load_model:
clf = load_model_from_path(path_to_load)
compute_confusion_matrix(clf, upsample=False, subplot=subplot, title=title, path_to_data=path_to_data, binary=binary)
else:
clf = neighbors.KNeighborsClassifier(n_neighbors=3, weights='distance')
clf = train_model(clf, cross_validate=True, conf_matrix=True, save_model_at_path=path_to_save_model, subplot=subplot, title=title)
return clf
def train_logregr(path_to_data=None, path_to_save_model=None, load_model=False, path_to_load=None, binary=True, subplot=111, title="", replicate=False):
"""
Trains a logistic regression model.
If no path_to_data is used, it will assume the default data directory.
If no path_to_save_model is provided, it will save to the local directory.
If load_model is True, it will load the model from the given location and resume training.
If binary is True, the model will be trained to simply detect whether, given three Seasons' worth of messages, there
will be a betrayal between these users in this order phase.
If replicate is True, this will attempt to train a model that corresponds to what the authors did.
"""
print("Training logistic regression model...")
if load_model:
clf = load_model_from_path(path_to_load)
compute_confusion_matrix(clf, upsample=False, subplot=subplot, title=title, path_to_data=path_to_data, binary=binary)
else:
clf = LogisticRegression(penalty='l2', dual=False, tol=0.0001, C=0.1, fit_intercept=True,
intercept_scaling=1, class_weight='balanced', random_state=None, solver='liblinear', max_iter=200)
clf = train_model(clf, cross_validate=True, conf_matrix=True, save_model_at_path=path_to_save_model, subplot=subplot, title=title)
return clf
def train_rnn(path_to_data=None, path_to_save_model="rnn.hdf5", load_model=False, path_to_load="rnn.hdf5", binary=True, subplot=111, title=""):
"""
"""
def make_model(X):
model = Sequential()
model.add(LSTM(256, input_shape=X.shape[1:], dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(1, activation='sigmoid'))
return model
print("Training the RNN...")
print(" |-> Getting the data...")
X_train, y_train = _get_rnn_data(path_to_data, binary)
X_test, y_test = X_validation_set, Y_validation_set
def concat(Z):
temp = []
for z in Z:
temp += z
return temp
print(" |-> Concatenating the data for the RNN...")
X_train = np.array(concat(X_train))
X_test = np.array(concat(X_test))
y_train = np.array(concat(y_train))
y_test = np.array(concat(y_test))
print(" |-> X shape:", X_train.shape)
print(" |-> Y shape:", y_train.shape)
X_train = np.reshape(X_train, (X_train.shape[0], 1, X_train.shape[1]))
X_test = np.reshape(X_test, (X_test.shape[0], 1, X_test.shape[1]))
print(" |-> X shape after reshape:", X_train.shape)
if load_model:
print(" |-> Loading saved model...")
model = keras.models.load_model(path_to_load)
else:
model = make_model(X_train)
print(" |-> Compiling...")
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
print(" |-> Fitting the model...")
checkpointer = ModelCheckpoint(filepath=path_to_save_model, verbose=1, save_best_only=True)
model.fit(X_train, y_train, batch_size=10, epochs=1000, verbose=2, validation_data=(X_test, y_test), callbacks=[checkpointer])
print(" |-> Evaluating the model...")
score = model.evaluate(X_test, y_test, verbose=1)
print("")
print(" |-> Loss:", score[0])
print(" |-> Accuracy:", score[1])
compute_confusion_matrix(model, upsample=False, subplot=subplot, title=title, round_data=True)
def train_mlp(path_to_data=None, path_to_save_model="mlp.hdf5", load_model=False, path_to_load="mlp.hdf5", binary=True, subplot=111, title=""):
"""
Trains a multilayer perceptron.
If no path_to_data is used, it will assume the default data directory.
If no path_to_save_model is provided, it will save to the local directory.
If load_model is True, it will load the model from the given location and resume training.
If binary is True, the model will be trained to simply detect whether, given three Seasons' worth of messages, there
will be a betrayal between these users in this order phase.
"""
print("Training the MLP...")
def make_model():
model = Sequential()
model.add(Dense(1024, input_dim=30, kernel_initializer='normal', activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(256, kernel_initializer='normal', activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
return model
print(" |-> Getting the data...")
X_train, y_train = _get_xy(path_to_data, binary)
X_test = X_validation_set
y_test = Y_validation_set
if load_model:
print(" |-> Loading saved model...")
model = keras.models.load_model(path_to_load)
else:
model = make_model()
print(" |-> Compiling...")
model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
print(" |-> Fitting the model...")
checkpointer = ModelCheckpoint(filepath=path_to_save_model, verbose=1, save_best_only=True)
lr_reducer = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=50, min_lr=0.00001)
model.fit(X_train, y_train, batch_size=20, epochs=1000, verbose=2, validation_data=(X_test, y_test), callbacks=[checkpointer, lr_reducer])
print(" |-> Evaluating the model...")
score = model.evaluate(X_test, y_test, verbose=1)
print("")
print(" |-> Loss:", score[0])
print(" |-> Accuracy:", score[1])
compute_confusion_matrix(model, upsample=False, subplot=subplot, title=title, round_data=True)
return model
def train_random_forest(path_to_data=None, path_to_save_model=None, load_model=False, path_to_load=None, binary=True, subplot=111, title=""):
"""
Trains a random forest classifier on the dataset.
If no path_to_data is used, it will assume the default data directory.
If no path_to_save_model is provided, it will save to the local directory.
If load_model is True, it will load the model from the given location and resume training.
If binary is True, the model will be trained to simply detect whether, given three Seasons' worth of messages, there
will be a betrayal between these users in this order phase.
"""
print("Training the random forest...")
if load_model:
clf = load_model_from_path(path_to_load)
compute_confusion_matrix(clf, upsample=False, subplot=subplot, title=title, path_to_data=path_to_data, binary=binary)
else:
clf = RandomForestClassifier(class_weight='balanced')
clf = train_model(clf, cross_validate=True, conf_matrix=True, save_model_at_path=path_to_save_model, subplot=subplot, title=title)
return clf
def train_svm(path_to_data=None, path_to_save_model=None, load_model=False, path_to_load=None, binary=True, subplot=111, title=""):
"""
Trains an SVM classifier on the dataset.
If no path_to_data is used, it will assume the default data directory.
If no path_to_save_model is provided, it will save to the local directory.
If load_model is True, it will load the model from the given location and resume training.
If binary is True, the model will be trained to simply detect whether, given three Seasons' worth of messages, there
will be a betrayal between these users in this order phase.
"""
print("Training the SVM with nonlinear kernel (RBF)...")
if load_model:
clf = load_model_from_path(path_to_load)
compute_confusion_matrix(clf, upsample=False, subplot=subplot, title=title, path_to_data=path_to_data, binary=binary)
else:
clf = svm.SVC(class_weight='balanced')
clf = train_model(clf, cross_validate=True, conf_matrix=True, save_model_at_path=path_to_save_model, subplot=subplot, title=title)
return clf
def train_tree(path_to_data=None, path_to_save_model=None, load_model=False, path_to_load=None, binary=True, subplot=111, title=""):
"""
Trains a decision tree classifier on the dataset.
If no path_to_data is used, it will assume the default data directory.
If no path_to_save_model is provided, it will save to the local directory.
If load_model is True, it will load the model from the given location and resume training.
If binary is True, the model will be trained to simply detect whether, given three Seasons' worth of messages, there
will be a betrayal between these users in this order phase.
"""
print("Training the decision tree model...")
if load_model:
clf = load_model_from_path(path_to_load)
compute_confusion_matrix(clf, upsample=False, subplot=subplot, title=title, path_to_data=path_to_data, binary=binary)
else:
clf = tree.DecisionTreeClassifier(class_weight='balanced')
clf = train_model(clf, cross_validate=True, conf_matrix=True, save_model_at_path=path_to_save_model, subplot=subplot, title=title)
return clf
def train_model(clf, cross_validate=False, conf_matrix=False, path_to_data=None, binary=True, save_model_at_path=None, subplot=111, title="Confusion Matrix", replicate=False):
"""
Trains the given model.
If confusion_matrix is True, a confusion matrix subplot will be added to plt.
If path_to_data is specified, it will get the data from that location, otherwise it will get it from the default location.
"""
X_train, y_train = _get_xy(path_to_data, binary, replicate=replicate)
X_test, y_test = X_validation_set, Y_validation_set
clf = clf.fit(X_train, y_train)
if cross_validate:
scores = cross_val_score(clf, X_train, y_train, cv=5, n_jobs=-1)
print(" |-> Scores:", scores)
if confusion_matrix:
compute_confusion_matrix(clf, upsample=False, subplot=subplot, title=title, path_to_data=path_to_data, binary=binary)
#compute_roc_curve(clf, X_train, y_train, subplot=subplot, title=title)
if save_model_at_path:
joblib.dump(clf, save_model_at_path)
return clf
def compute_roc_curve(clf, X, y, subplot=111, title="ROC"):
"""
Computes and plots an ROC curve for the given classifier.
"""
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(n_splits=6)
classifier = clf
classifier.probability=True
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
colors = itertools.cycle(['cyan', 'indigo', 'seagreen', 'yellow', 'blue', 'darkorange'])
lw = 2
i = 0
for (train, test), color in zip(cv.split(X, y), colors):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += scipy.interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.subplot(subplot)
plt.plot(fpr, tpr, lw=lw, color=color, label='Fold %d (area = %0.2f)' % (i, roc_auc))
i += 1
plt.plot([0, 1], [0, 1], linestyle='--', lw=lw, color='k', label='Luck')
mean_tpr /= cv.get_n_splits(X, y)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, color='g', linestyle='--', label='Mean (area = %0.2f)' % mean_auc, lw=lw)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title(title)
plt.legend(loc="lower right")
def compute_confusion_matrix(clf, upsample=True, subplot=111, title="Confusion Matrix", path_to_data=None, binary=True, round_data=False):
"""
Computes and plots a confusion matrix.
@param upsample is deprecated - instead, just change the upscale value in data.py
"""
X_test, y_test = X_validation_set, Y_validation_set
y_pred = clf.predict(X_test)
if round_data:
y_pred = [round(y[0]) for y in y_pred] # In case predicted value is from a model that does not output a binary value
cnf_matrix = confusion_matrix(y_test, y_pred)
plot_confusion_matrix(cnf_matrix, classes=["No Betrayal", "Betrayal"], subplot=subplot, title=title)
print("Number of samples in validation set:", len(y_test))
print("Number of betrayals in validation set:", sum(y_test))
prfs = precision_recall_fscore_support(y_test, y_pred, average='micro')
print("Precision, Recall, FScore, Support | Micro", prfs)
prfs = precision_recall_fscore_support(y_test, y_pred, average='macro')
print("Precision, Recall, FScore, Support | Macro", prfs)
prfs = precision_recall_fscore_support(y_test, y_pred, average='weighted')
print("Precision, Recall, FScore, Support | Weighted", prfs)
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy:", accuracy)
def load_model_from_path(path):
"""
Returns a clf from the given path.
"""
return joblib.load(path)
def pca_display(Xs, Ys, dimensions=3):
"""
"""
assert dimensions == 2 or dimensions == 3, "Only 2D or 3D views are supported for pca_display"
pca = decomposition.PCA(n_components=dimensions)
print(Xs.shape)
pca.fit(Xs)
print("Here is how much variance is accounted for after dimension reduction:")
print(pca.explained_variance_ratio_)
X = pca.transform(Xs)
fig = plt.figure(1, figsize=(4, 13))
plt.clf()
# Invert the Y array: [0 1 0 0 1 0] -> [1 0 1 1 0 1]
print("Number of betrayals:", len([i for i in Ys if i == 1]))
print("Number of non betrayals:", len([i for i in Ys if i == 0]))
y = Ys
y = np.choose(y, [1, 0]).astype(np.float)
if dimensions == 3:
ax = Axes3D(fig, rect=[0, 0, 0.95, 1], elev=48, azim=134)
plt.cla()
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.brg)
else:
plt.cla()
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.brg)
plt.show()
class Ensemble:
def __init__(self, models, names):
self.models = models
self.names = names
def predict(self, Xs):
ys = []
for X in Xs:
Y = []
for name, model in zip(self.names, self.models):
if name == "MLP":
Y.append(np.array([model.predict(X.reshape(1, 30)).tolist()[0]]))
else:
Y.append(model.predict(X.reshape(1, -1)).tolist()[0])
ys.append(Y)
weighted_ys = []
precision = 0.87 + 0.8+ 0.85 + 0.85 + 0.83
recall = 0.89 + 0.78 + 0.63 + 0.71 + 0.69
for Y in ys:
weighted_Y = []
for name, y in zip(self.names, Y):
to_append = 0
if name == "MLP" and y:
to_append = 1 * 0.89 / recall
elif name == "MLP" and not y:
to_append = -1 * 0.87 / precision
elif name == "KNN" and y:
to_append = 1 * 0.78 / recall
elif name == "KNN" and not y:
to_append = -1 * 0.8 / precision
elif name == "Tree" and y:
to_append = 1 * 0.63 / recall
elif name == "Tree" and not y:
to_append = -1 * 0.85 / precision
elif name == "Forest" and y:
to_append = 1 * 0.71 / recall
elif name == "Forest" and not y:
to_append = -1 * 0.85 / precision
elif name == "SVM" and y:
to_append = 1 * 0.69 / recall
elif name == "SVM" and not y:
to_append = -1 * 0.83 /precision
else:
assert False, "Model: " + name + " not accounted for when y is " + bool(y)
weighted_Y.append(to_append)
prediction = np.array([1]) if sum(weighted_Y) > 0.51 else np.array([0])
weighted_ys.append(prediction)
return weighted_ys
if __name__ == "__main__":
Xs, Ys = _get_xy()
ones = [y for y in Ys if y == 1]
zeros = [y for y in Ys if y == 0]
assert(len(ones) + len(zeros) == len(Ys))
print("Betrayals:", len(ones))
print("Non betrayals:", len(zeros))
pca_display(Xs, Ys, dimensions=2)
#train_rnn(path_to_save_model="rnn.hdf5", subplot=236, title="RNN")
#mlp = train_mlp(path_to_save_model="mlp.hdf5", subplot=231, title="MLP")
#knn = train_knn(path_to_save_model="knn.model", subplot=232, title="KNN")
#tree =train_tree(path_to_save_model="tree.model", subplot=233, title="Tree")
#forest = train_random_forest(path_to_save_model="forest.model", subplot=234, title="Forest")
#svm = train_svm(path_to_save_model="svm.model", subplot=235, title="SVM")
#train_logregr(path_to_save_model="logregr.model", subplot=236, title="Log Reg")
mlp = train_mlp(load_model=True, path_to_load="models/mlp.hdf5", subplot=231, title="MLP")
knn = train_knn(load_model=True, path_to_load="models/knn.model", subplot=232, title="KNN")
tree = train_tree(load_model=True, path_to_load="models/tree.model", subplot=233, title="Tree")
forest = train_random_forest(load_model=True, path_to_load="models/forest.model", subplot=234, title="Forest")
svm = train_svm(load_model=True, path_to_load="models/svm.model", subplot=235, title="SVM")
#rnn = train_rnn(load_model=True, path_to_load="models/rnn.hdf5", subplot=236, title="RNN")
#logregr = train_logregr(load_model=True, path_to_load="models/logregr.model", subplot=236, title="Log Reg", replicate=True)
ensemble = Ensemble([mlp, knn, tree, forest, svm], ["MLP", "KNN", "Tree", "Forest", "SVM"])
print("Computing the ensemble...")
compute_confusion_matrix(ensemble, upsample=False, subplot=236, title="Ensemble")
plt.show()
|
mit
|
mindw/shapely
|
docs/code/simplify.py
|
6
|
1221
|
from matplotlib import pyplot
from shapely.geometry import MultiPoint, Point
from descartes.patch import PolygonPatch
from figures import SIZE, BLUE, GRAY
fig = pyplot.figure(1, figsize=SIZE, dpi=90) #1, figsize=SIZE, dpi=90)
p = Point(1, 1).buffer(1.5)
# 1
ax = fig.add_subplot(121)
q = p.simplify(0.2)
patch1a = PolygonPatch(p, facecolor=GRAY, edgecolor=GRAY, alpha=0.5, zorder=1)
ax.add_patch(patch1a)
patch1b = PolygonPatch(q, facecolor=BLUE, edgecolor=BLUE, alpha=0.5, zorder=2)
ax.add_patch(patch1b)
ax.set_title('a) tolerance 0.2')
xrange = [-1, 3]
yrange = [-1, 3]
ax.set_xlim(*xrange)
ax.set_xticks(range(*xrange) + [xrange[-1]])
ax.set_ylim(*yrange)
ax.set_yticks(range(*yrange) + [yrange[-1]])
ax.set_aspect(1)
#2
ax = fig.add_subplot(122)
r = p.simplify(0.5)
patch2a = PolygonPatch(p, facecolor=GRAY, edgecolor=GRAY, alpha=0.5, zorder=1)
ax.add_patch(patch2a)
patch2b = PolygonPatch(r, facecolor=BLUE, edgecolor=BLUE, alpha=0.5, zorder=2)
ax.add_patch(patch2b)
ax.set_title('b) tolerance 0.5')
xrange = [-1, 3]
yrange = [-1, 3]
ax.set_xlim(*xrange)
ax.set_xticks(range(*xrange) + [xrange[-1]])
ax.set_ylim(*yrange)
ax.set_yticks(range(*yrange) + [yrange[-1]])
ax.set_aspect(1)
pyplot.show()
|
bsd-3-clause
|
chatcannon/scipy
|
scipy/optimize/_lsq/least_squares.py
|
3
|
36471
|
"""Generic interface for least-square minimization."""
from warnings import warn
import numpy as np
from numpy.linalg import norm
from scipy.sparse import issparse, csr_matrix
from scipy.sparse.linalg import LinearOperator
from scipy.optimize import _minpack, OptimizeResult
from scipy.optimize._numdiff import approx_derivative, group_columns
from scipy._lib.six import string_types
from .trf import trf
from .dogbox import dogbox
from .common import EPS, in_bounds, make_strictly_feasible
TERMINATION_MESSAGES = {
-1: "Improper input parameters status returned from `leastsq`",
0: "The maximum number of function evaluations is exceeded.",
1: "`gtol` termination condition is satisfied.",
2: "`ftol` termination condition is satisfied.",
3: "`xtol` termination condition is satisfied.",
4: "Both `ftol` and `xtol` termination conditions are satisfied."
}
FROM_MINPACK_TO_COMMON = {
0: -1, # Improper input parameters from MINPACK.
1: 2,
2: 3,
3: 4,
4: 1,
5: 0
# There are 6, 7, 8 for too small tolerance parameters,
# but we guard against it by checking ftol, xtol, gtol beforehand.
}
def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step):
n = x0.size
if diff_step is None:
epsfcn = EPS
else:
epsfcn = diff_step**2
# Compute MINPACK's `diag`, which is inverse of our `x_scale` and
# ``x_scale='jac'`` corresponds to ``diag=None``.
if isinstance(x_scale, string_types) and x_scale == 'jac':
diag = None
else:
diag = 1 / x_scale
full_output = True
col_deriv = False
factor = 100.0
if jac is None:
if max_nfev is None:
# n squared to account for Jacobian evaluations.
max_nfev = 100 * n * (n + 1)
x, info, status = _minpack._lmdif(
fun, x0, (), full_output, ftol, xtol, gtol,
max_nfev, epsfcn, factor, diag)
else:
if max_nfev is None:
max_nfev = 100 * n
x, info, status = _minpack._lmder(
fun, jac, x0, (), full_output, col_deriv,
ftol, xtol, gtol, max_nfev, factor, diag)
f = info['fvec']
if callable(jac):
J = jac(x)
else:
J = np.atleast_2d(approx_derivative(fun, x))
cost = 0.5 * np.dot(f, f)
g = J.T.dot(f)
g_norm = norm(g, ord=np.inf)
nfev = info['nfev']
njev = info.get('njev', None)
status = FROM_MINPACK_TO_COMMON[status]
active_mask = np.zeros_like(x0, dtype=int)
return OptimizeResult(
x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm,
active_mask=active_mask, nfev=nfev, njev=njev, status=status)
def prepare_bounds(bounds, n):
lb, ub = [np.asarray(b, dtype=float) for b in bounds]
if lb.ndim == 0:
lb = np.resize(lb, n)
if ub.ndim == 0:
ub = np.resize(ub, n)
return lb, ub
def check_tolerance(ftol, xtol, gtol):
message = "{} is too low, setting to machine epsilon {}."
if ftol < EPS:
warn(message.format("`ftol`", EPS))
ftol = EPS
if xtol < EPS:
warn(message.format("`xtol`", EPS))
xtol = EPS
if gtol < EPS:
warn(message.format("`gtol`", EPS))
gtol = EPS
return ftol, xtol, gtol
def check_x_scale(x_scale, x0):
if isinstance(x_scale, string_types) and x_scale == 'jac':
return x_scale
try:
x_scale = np.asarray(x_scale, dtype=float)
valid = np.all(np.isfinite(x_scale)) and np.all(x_scale > 0)
except (ValueError, TypeError):
valid = False
if not valid:
raise ValueError("`x_scale` must be 'jac' or array_like with "
"positive numbers.")
if x_scale.ndim == 0:
x_scale = np.resize(x_scale, x0.shape)
if x_scale.shape != x0.shape:
raise ValueError("Inconsistent shapes between `x_scale` and `x0`.")
return x_scale
def check_jac_sparsity(jac_sparsity, m, n):
if jac_sparsity is None:
return None
if not issparse(jac_sparsity):
jac_sparsity = np.atleast_2d(jac_sparsity)
if jac_sparsity.shape != (m, n):
raise ValueError("`jac_sparsity` has wrong shape.")
return jac_sparsity, group_columns(jac_sparsity)
# Loss functions.
def huber(z, rho, cost_only):
mask = z <= 1
rho[0, mask] = z[mask]
rho[0, ~mask] = 2 * z[~mask]**0.5 - 1
if cost_only:
return
rho[1, mask] = 1
rho[1, ~mask] = z[~mask]**-0.5
rho[2, mask] = 0
rho[2, ~mask] = -0.5 * z[~mask]**-1.5
def soft_l1(z, rho, cost_only):
t = 1 + z
rho[0] = 2 * (t**0.5 - 1)
if cost_only:
return
rho[1] = t**-0.5
rho[2] = -0.5 * t**-1.5
def cauchy(z, rho, cost_only):
rho[0] = np.log1p(z)
if cost_only:
return
t = 1 + z
rho[1] = 1 / t
rho[2] = -1 / t**2
def arctan(z, rho, cost_only):
rho[0] = np.arctan(z)
if cost_only:
return
t = 1 + z**2
rho[1] = 1 / t
rho[2] = -2 * z / t**2
IMPLEMENTED_LOSSES = dict(linear=None, huber=huber, soft_l1=soft_l1,
cauchy=cauchy, arctan=arctan)
def construct_loss_function(m, loss, f_scale):
if loss == 'linear':
return None
if not callable(loss):
loss = IMPLEMENTED_LOSSES[loss]
rho = np.empty((3, m))
def loss_function(f, cost_only=False):
z = (f / f_scale) ** 2
loss(z, rho, cost_only=cost_only)
if cost_only:
return 0.5 * f_scale ** 2 * np.sum(rho[0])
rho[0] *= f_scale ** 2
rho[2] /= f_scale ** 2
return rho
else:
def loss_function(f, cost_only=False):
z = (f / f_scale) ** 2
rho = loss(z)
if cost_only:
return 0.5 * f_scale ** 2 * np.sum(rho[0])
rho[0] *= f_scale ** 2
rho[2] /= f_scale ** 2
return rho
return loss_function
def least_squares(
fun, x0, jac='2-point', bounds=(-np.inf, np.inf), method='trf',
ftol=1e-8, xtol=1e-8, gtol=1e-8, x_scale=1.0, loss='linear',
f_scale=1.0, diff_step=None, tr_solver=None, tr_options={},
jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs={}):
"""Solve a nonlinear least-squares problem with bounds on the variables.
Given the residuals f(x) (an m-dimensional function of n variables) and
the loss function rho(s) (a scalar function), `least_squares` finds a
local minimum of the cost function F(x)::
minimize F(x) = 0.5 * sum(rho(f_i(x)**2), i = 0, ..., m - 1)
subject to lb <= x <= ub
The purpose of the loss function rho(s) is to reduce the influence of
outliers on the solution.
Parameters
----------
fun : callable
Function which computes the vector of residuals, with the signature
``fun(x, *args, **kwargs)``, i.e., the minimization proceeds with
respect to its first argument. The argument ``x`` passed to this
function is an ndarray of shape (n,) (never a scalar, even for n=1).
It must return a 1-d array_like of shape (m,) or a scalar.
x0 : array_like with shape (n,) or float
Initial guess on independent variables. If float, it will be treated
as a 1-d array with one element.
jac : {'2-point', '3-point', 'cs', callable}, optional
Method of computing the Jacobian matrix (an m-by-n matrix, where
element (i, j) is the partial derivative of f[i] with respect to
x[j]). The keywords select a finite difference scheme for numerical
estimation. The scheme '3-point' is more accurate, but requires
twice as much operations compared to '2-point' (default). The
scheme 'cs' uses complex steps, and while potentially the most
accurate, it is applicable only when `fun` correctly handles
complex inputs and can be analytically continued to the complex
plane. Method 'lm' always uses the '2-point' scheme. If callable,
it is used as ``jac(x, *args, **kwargs)`` and should return a
good approximation (or the exact value) for the Jacobian as an
array_like (np.atleast_2d is applied), a sparse matrix or a
`scipy.sparse.linalg.LinearOperator`.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each array must match the size of `x0` or be a scalar, in the latter
case a bound will be the same for all variables. Use ``np.inf`` with
an appropriate sign to disable bounds on all or some variables.
method : {'trf', 'dogbox', 'lm'}, optional
Algorithm to perform minimization.
* 'trf' : Trust Region Reflective algorithm, particularly suitable
for large sparse problems with bounds. Generally robust method.
* 'dogbox' : dogleg algorithm with rectangular trust regions,
typical use case is small problems with bounds. Not recommended
for problems with rank-deficient Jacobian.
* 'lm' : Levenberg-Marquardt algorithm as implemented in MINPACK.
Doesn't handle bounds and sparse Jacobians. Usually the most
efficient method for small unconstrained problems.
Default is 'trf'. See Notes for more information.
ftol : float, optional
Tolerance for termination by the change of the cost function. Default
is 1e-8. The optimization process is stopped when ``dF < ftol * F``,
and there was an adequate agreement between a local quadratic model and
the true model in the last step.
xtol : float, optional
Tolerance for termination by the change of the independent variables.
Default is 1e-8. The exact condition depends on the `method` used:
* For 'trf' and 'dogbox' : ``norm(dx) < xtol * (xtol + norm(x))``
* For 'lm' : ``Delta < xtol * norm(xs)``, where ``Delta`` is
a trust-region radius and ``xs`` is the value of ``x``
scaled according to `x_scale` parameter (see below).
gtol : float, optional
Tolerance for termination by the norm of the gradient. Default is 1e-8.
The exact condition depends on a `method` used:
* For 'trf' : ``norm(g_scaled, ord=np.inf) < gtol``, where
``g_scaled`` is the value of the gradient scaled to account for
the presence of the bounds [STIR]_.
* For 'dogbox' : ``norm(g_free, ord=np.inf) < gtol``, where
``g_free`` is the gradient with respect to the variables which
are not in the optimal state on the boundary.
* For 'lm' : the maximum absolute value of the cosine of angles
between columns of the Jacobian and the residual vector is less
than `gtol`, or the residual vector is zero.
x_scale : array_like or 'jac', optional
Characteristic scale of each variable. Setting `x_scale` is equivalent
to reformulating the problem in scaled variables ``xs = x / x_scale``.
An alternative view is that the size of a trust region along j-th
dimension is proportional to ``x_scale[j]``. Improved convergence may
be achieved by setting `x_scale` such that a step of a given size
along any of the scaled variables has a similar effect on the cost
function. If set to 'jac', the scale is iteratively updated using the
inverse norms of the columns of the Jacobian matrix (as described in
[JJMore]_).
loss : str or callable, optional
Determines the loss function. The following keyword values are allowed:
* 'linear' (default) : ``rho(z) = z``. Gives a standard
least-squares problem.
* 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth
approximation of l1 (absolute value) loss. Usually a good
choice for robust least squares.
* 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works
similarly to 'soft_l1'.
* 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers
influence, but may cause difficulties in optimization process.
* 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on
a single residual, has properties similar to 'cauchy'.
If callable, it must take a 1-d ndarray ``z=f**2`` and return an
array_like with shape (3, m) where row 0 contains function values,
row 1 contains first derivatives and row 2 contains second
derivatives. Method 'lm' supports only 'linear' loss.
f_scale : float, optional
Value of soft margin between inlier and outlier residuals, default
is 1.0. The loss function is evaluated as follows
``rho_(f**2) = C**2 * rho(f**2 / C**2)``, where ``C`` is `f_scale`,
and ``rho`` is determined by `loss` parameter. This parameter has
no effect with ``loss='linear'``, but for other `loss` values it is
of crucial importance.
max_nfev : None or int, optional
Maximum number of function evaluations before the termination.
If None (default), the value is chosen automatically:
* For 'trf' and 'dogbox' : 100 * n.
* For 'lm' : 100 * n if `jac` is callable and 100 * n * (n + 1)
otherwise (because 'lm' counts function calls in Jacobian
estimation).
diff_step : None or array_like, optional
Determines the relative step size for the finite difference
approximation of the Jacobian. The actual step is computed as
``x * diff_step``. If None (default), then `diff_step` is taken to be
a conventional "optimal" power of machine epsilon for the finite
difference scheme used [NR]_.
tr_solver : {None, 'exact', 'lsmr'}, optional
Method for solving trust-region subproblems, relevant only for 'trf'
and 'dogbox' methods.
* 'exact' is suitable for not very large problems with dense
Jacobian matrices. The computational complexity per iteration is
comparable to a singular value decomposition of the Jacobian
matrix.
* 'lsmr' is suitable for problems with sparse and large Jacobian
matrices. It uses the iterative procedure
`scipy.sparse.linalg.lsmr` for finding a solution of a linear
least-squares problem and only requires matrix-vector product
evaluations.
If None (default) the solver is chosen based on the type of Jacobian
returned on the first iteration.
tr_options : dict, optional
Keyword options passed to trust-region solver.
* ``tr_solver='exact'``: `tr_options` are ignored.
* ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`.
Additionally ``method='trf'`` supports 'regularize' option
(bool, default is True) which adds a regularization term to the
normal equation, which improves convergence if the Jacobian is
rank-deficient [Byrd]_ (eq. 3.4).
jac_sparsity : {None, array_like, sparse matrix}, optional
Defines the sparsity structure of the Jacobian matrix for finite
difference estimation, its shape must be (m, n). If the Jacobian has
only few non-zero elements in *each* row, providing the sparsity
structure will greatly speed up the computations [Curtis]_. A zero
entry means that a corresponding element in the Jacobian is identically
zero. If provided, forces the use of 'lsmr' trust-region solver.
If None (default) then dense differencing will be used. Has no effect
for 'lm' method.
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations (not supported by 'lm'
method).
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun` and `jac`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)`` and the same for
`jac`.
Returns
-------
`OptimizeResult` with the following fields defined:
x : ndarray, shape (n,)
Solution found.
cost : float
Value of the cost function at the solution.
fun : ndarray, shape (m,)
Vector of residuals at the solution.
jac : ndarray, sparse matrix or LinearOperator, shape (m, n)
Modified Jacobian matrix at the solution, in the sense that J^T J
is a Gauss-Newton approximation of the Hessian of the cost function.
The type is the same as the one used by the algorithm.
grad : ndarray, shape (m,)
Gradient of the cost function at the solution.
optimality : float
First-order optimality measure. In unconstrained problems, it is always
the uniform norm of the gradient. In constrained problems, it is the
quantity which was compared with `gtol` during iterations.
active_mask : ndarray of int, shape (n,)
Each component shows whether a corresponding constraint is active
(that is, whether a variable is at the bound):
* 0 : a constraint is not active.
* -1 : a lower bound is active.
* 1 : an upper bound is active.
Might be somewhat arbitrary for 'trf' method as it generates a sequence
of strictly feasible iterates and `active_mask` is determined within a
tolerance threshold.
nfev : int
Number of function evaluations done. Methods 'trf' and 'dogbox' do not
count function calls for numerical Jacobian approximation, as opposed
to 'lm' method.
njev : int or None
Number of Jacobian evaluations done. If numerical Jacobian
approximation is used in 'lm' method, it is set to None.
status : int
The reason for algorithm termination:
* -1 : improper input parameters status returned from MINPACK.
* 0 : the maximum number of function evaluations is exceeded.
* 1 : `gtol` termination condition is satisfied.
* 2 : `ftol` termination condition is satisfied.
* 3 : `xtol` termination condition is satisfied.
* 4 : Both `ftol` and `xtol` termination conditions are satisfied.
message : str
Verbal description of the termination reason.
success : bool
True if one of the convergence criteria is satisfied (`status` > 0).
See Also
--------
leastsq : A legacy wrapper for the MINPACK implementation of the
Levenberg-Marquadt algorithm.
curve_fit : Least-squares minimization applied to a curve fitting problem.
Notes
-----
Method 'lm' (Levenberg-Marquardt) calls a wrapper over least-squares
algorithms implemented in MINPACK (lmder, lmdif). It runs the
Levenberg-Marquardt algorithm formulated as a trust-region type algorithm.
The implementation is based on paper [JJMore]_, it is very robust and
efficient with a lot of smart tricks. It should be your first choice
for unconstrained problems. Note that it doesn't support bounds. Also
it doesn't work when m < n.
Method 'trf' (Trust Region Reflective) is motivated by the process of
solving a system of equations, which constitute the first-order optimality
condition for a bound-constrained minimization problem as formulated in
[STIR]_. The algorithm iteratively solves trust-region subproblems
augmented by a special diagonal quadratic term and with trust-region shape
determined by the distance from the bounds and the direction of the
gradient. This enhancements help to avoid making steps directly into bounds
and efficiently explore the whole space of variables. To further improve
convergence, the algorithm considers search directions reflected from the
bounds. To obey theoretical requirements, the algorithm keeps iterates
strictly feasible. With dense Jacobians trust-region subproblems are
solved by an exact method very similar to the one described in [JJMore]_
(and implemented in MINPACK). The difference from the MINPACK
implementation is that a singular value decomposition of a Jacobian
matrix is done once per iteration, instead of a QR decomposition and series
of Givens rotation eliminations. For large sparse Jacobians a 2-d subspace
approach of solving trust-region subproblems is used [STIR]_, [Byrd]_.
The subspace is spanned by a scaled gradient and an approximate
Gauss-Newton solution delivered by `scipy.sparse.linalg.lsmr`. When no
constraints are imposed the algorithm is very similar to MINPACK and has
generally comparable performance. The algorithm works quite robust in
unbounded and bounded problems, thus it is chosen as a default algorithm.
Method 'dogbox' operates in a trust-region framework, but considers
rectangular trust regions as opposed to conventional ellipsoids [Voglis]_.
The intersection of a current trust region and initial bounds is again
rectangular, so on each iteration a quadratic minimization problem subject
to bound constraints is solved approximately by Powell's dogleg method
[NumOpt]_. The required Gauss-Newton step can be computed exactly for
dense Jacobians or approximately by `scipy.sparse.linalg.lsmr` for large
sparse Jacobians. The algorithm is likely to exhibit slow convergence when
the rank of Jacobian is less than the number of variables. The algorithm
often outperforms 'trf' in bounded problems with a small number of
variables.
Robust loss functions are implemented as described in [BA]_. The idea
is to modify a residual vector and a Jacobian matrix on each iteration
such that computed gradient and Gauss-Newton Hessian approximation match
the true gradient and Hessian approximation of the cost function. Then
the algorithm proceeds in a normal way, i.e. robust loss functions are
implemented as a simple wrapper over standard least-squares algorithms.
.. versionadded:: 0.17.0
References
----------
.. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior,
and Conjugate Gradient Method for Large-Scale Bound-Constrained
Minimization Problems," SIAM Journal on Scientific Computing,
Vol. 21, Number 1, pp 1-23, 1999.
.. [NR] William H. Press et. al., "Numerical Recipes. The Art of Scientific
Computing. 3rd edition", Sec. 5.7.
.. [Byrd] R. H. Byrd, R. B. Schnabel and G. A. Shultz, "Approximate
solution of the trust region problem by minimization over
two-dimensional subspaces", Math. Programming, 40, pp. 247-263,
1988.
.. [Curtis] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of
Mathematics and its Applications, 13, pp. 117-120, 1974.
.. [JJMore] J. J. More, "The Levenberg-Marquardt Algorithm: Implementation
and Theory," Numerical Analysis, ed. G. A. Watson, Lecture
Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
.. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region
Dogleg Approach for Unconstrained and Bound Constrained
Nonlinear Optimization", WSEAS International Conference on
Applied Mathematics, Corfu, Greece, 2004.
.. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization,
2nd edition", Chapter 4.
.. [BA] B. Triggs et. al., "Bundle Adjustment - A Modern Synthesis",
Proceedings of the International Workshop on Vision Algorithms:
Theory and Practice, pp. 298-372, 1999.
Examples
--------
In this example we find a minimum of the Rosenbrock function without bounds
on independed variables.
>>> def fun_rosenbrock(x):
... return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
Notice that we only provide the vector of the residuals. The algorithm
constructs the cost function as a sum of squares of the residuals, which
gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``.
>>> from scipy.optimize import least_squares
>>> x0_rosenbrock = np.array([2, 2])
>>> res_1 = least_squares(fun_rosenbrock, x0_rosenbrock)
>>> res_1.x
array([ 1., 1.])
>>> res_1.cost
9.8669242910846867e-30
>>> res_1.optimality
8.8928864934219529e-14
We now constrain the variables, in such a way that the previous solution
becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and
``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter
to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``.
We also provide the analytic Jacobian:
>>> def jac_rosenbrock(x):
... return np.array([
... [-20 * x[0], 10],
... [-1, 0]])
Putting this all together, we see that the new solution lies on the bound:
>>> res_2 = least_squares(fun_rosenbrock, x0_rosenbrock, jac_rosenbrock,
... bounds=([-np.inf, 1.5], np.inf))
>>> res_2.x
array([ 1.22437075, 1.5 ])
>>> res_2.cost
0.025213093946805685
>>> res_2.optimality
1.5885401433157753e-07
Now we solve a system of equations (i.e., the cost function should be zero
at a minimum) for a Broyden tridiagonal vector-valued function of 100000
variables:
>>> def fun_broyden(x):
... f = (3 - x) * x + 1
... f[1:] -= x[:-1]
... f[:-1] -= 2 * x[1:]
... return f
The corresponding Jacobian matrix is sparse. We tell the algorithm to
estimate it by finite differences and provide the sparsity structure of
Jacobian to significantly speed up this process.
>>> from scipy.sparse import lil_matrix
>>> def sparsity_broyden(n):
... sparsity = lil_matrix((n, n), dtype=int)
... i = np.arange(n)
... sparsity[i, i] = 1
... i = np.arange(1, n)
... sparsity[i, i - 1] = 1
... i = np.arange(n - 1)
... sparsity[i, i + 1] = 1
... return sparsity
...
>>> n = 100000
>>> x0_broyden = -np.ones(n)
...
>>> res_3 = least_squares(fun_broyden, x0_broyden,
... jac_sparsity=sparsity_broyden(n))
>>> res_3.cost
4.5687069299604613e-23
>>> res_3.optimality
1.1650454296851518e-11
Let's also solve a curve fitting problem using robust loss function to
take care of outliers in the data. Define the model function as
``y = a + b * exp(c * t)``, where t is a predictor variable, y is an
observation and a, b, c are parameters to estimate.
First, define the function which generates the data with noise and
outliers, define the model parameters, and generate data:
>>> def gen_data(t, a, b, c, noise=0, n_outliers=0, random_state=0):
... y = a + b * np.exp(t * c)
...
... rnd = np.random.RandomState(random_state)
... error = noise * rnd.randn(t.size)
... outliers = rnd.randint(0, t.size, n_outliers)
... error[outliers] *= 10
...
... return y + error
...
>>> a = 0.5
>>> b = 2.0
>>> c = -1
>>> t_min = 0
>>> t_max = 10
>>> n_points = 15
...
>>> t_train = np.linspace(t_min, t_max, n_points)
>>> y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3)
Define function for computing residuals and initial estimate of
parameters.
>>> def fun(x, t, y):
... return x[0] + x[1] * np.exp(x[2] * t) - y
...
>>> x0 = np.array([1.0, 1.0, 0.0])
Compute a standard least-squares solution:
>>> res_lsq = least_squares(fun, x0, args=(t_train, y_train))
Now compute two solutions with two different robust loss functions. The
parameter `f_scale` is set to 0.1, meaning that inlier residuals should
not significantly exceed 0.1 (the noise level used).
>>> res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1,
... args=(t_train, y_train))
>>> res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1,
... args=(t_train, y_train))
And finally plot all the curves. We see that by selecting an appropriate
`loss` we can get estimates close to optimal even in the presence of
strong outliers. But keep in mind that generally it is recommended to try
'soft_l1' or 'huber' losses first (if at all necessary) as the other two
options may cause difficulties in optimization process.
>>> t_test = np.linspace(t_min, t_max, n_points * 10)
>>> y_true = gen_data(t_test, a, b, c)
>>> y_lsq = gen_data(t_test, *res_lsq.x)
>>> y_soft_l1 = gen_data(t_test, *res_soft_l1.x)
>>> y_log = gen_data(t_test, *res_log.x)
...
>>> import matplotlib.pyplot as plt
>>> plt.plot(t_train, y_train, 'o')
>>> plt.plot(t_test, y_true, 'k', linewidth=2, label='true')
>>> plt.plot(t_test, y_lsq, label='linear loss')
>>> plt.plot(t_test, y_soft_l1, label='soft_l1 loss')
>>> plt.plot(t_test, y_log, label='cauchy loss')
>>> plt.xlabel("t")
>>> plt.ylabel("y")
>>> plt.legend()
>>> plt.show()
"""
if method not in ['trf', 'dogbox', 'lm']:
raise ValueError("`method` must be 'trf', 'dogbox' or 'lm'.")
if jac not in ['2-point', '3-point', 'cs'] and not callable(jac):
raise ValueError("`jac` must be '2-point', '3-point', 'cs' or "
"callable.")
if tr_solver not in [None, 'exact', 'lsmr']:
raise ValueError("`tr_solver` must be None, 'exact' or 'lsmr'.")
if loss not in IMPLEMENTED_LOSSES and not callable(loss):
raise ValueError("`loss` must be one of {0} or a callable."
.format(IMPLEMENTED_LOSSES.keys()))
if method == 'lm' and loss != 'linear':
raise ValueError("method='lm' supports only 'linear' loss function.")
if verbose not in [0, 1, 2]:
raise ValueError("`verbose` must be in [0, 1, 2].")
if len(bounds) != 2:
raise ValueError("`bounds` must contain 2 elements.")
if max_nfev is not None and max_nfev <= 0:
raise ValueError("`max_nfev` must be None or positive integer.")
x0 = np.atleast_1d(x0).astype(float)
if x0.ndim > 1:
raise ValueError("`x0` must have at most 1 dimension.")
lb, ub = prepare_bounds(bounds, x0.shape[0])
if method == 'lm' and not np.all((lb == -np.inf) & (ub == np.inf)):
raise ValueError("Method 'lm' doesn't support bounds.")
if lb.shape != x0.shape or ub.shape != x0.shape:
raise ValueError("Inconsistent shapes between bounds and `x0`.")
if np.any(lb >= ub):
raise ValueError("Each lower bound mush be strictly less than each "
"upper bound.")
if not in_bounds(x0, lb, ub):
raise ValueError("`x0` is infeasible.")
x_scale = check_x_scale(x_scale, x0)
ftol, xtol, gtol = check_tolerance(ftol, xtol, gtol)
def fun_wrapped(x):
return np.atleast_1d(fun(x, *args, **kwargs))
if method == 'trf':
x0 = make_strictly_feasible(x0, lb, ub)
f0 = fun_wrapped(x0)
if f0.ndim != 1:
raise ValueError("`fun` must return at most 1-d array_like.")
if not np.all(np.isfinite(f0)):
raise ValueError("Residuals are not finite in the initial point.")
n = x0.size
m = f0.size
if method == 'lm' and m < n:
raise ValueError("Method 'lm' doesn't work when the number of "
"residuals is less than the number of variables.")
loss_function = construct_loss_function(m, loss, f_scale)
if callable(loss):
rho = loss_function(f0)
if rho.shape != (3, m):
raise ValueError("The return value of `loss` callable has wrong "
"shape.")
initial_cost = 0.5 * np.sum(rho[0])
elif loss_function is not None:
initial_cost = loss_function(f0, cost_only=True)
else:
initial_cost = 0.5 * np.dot(f0, f0)
if callable(jac):
J0 = jac(x0, *args, **kwargs)
if issparse(J0):
J0 = csr_matrix(J0)
def jac_wrapped(x, _=None):
return csr_matrix(jac(x, *args, **kwargs))
elif isinstance(J0, LinearOperator):
def jac_wrapped(x, _=None):
return jac(x, *args, **kwargs)
else:
J0 = np.atleast_2d(J0)
def jac_wrapped(x, _=None):
return np.atleast_2d(jac(x, *args, **kwargs))
else: # Estimate Jacobian by finite differences.
if method == 'lm':
if jac_sparsity is not None:
raise ValueError("method='lm' does not support "
"`jac_sparsity`.")
if jac != '2-point':
warn("jac='{0}' works equivalently to '2-point' "
"for method='lm'.".format(jac))
J0 = jac_wrapped = None
else:
if jac_sparsity is not None and tr_solver == 'exact':
raise ValueError("tr_solver='exact' is incompatible "
"with `jac_sparsity`.")
jac_sparsity = check_jac_sparsity(jac_sparsity, m, n)
def jac_wrapped(x, f):
J = approx_derivative(fun, x, rel_step=diff_step, method=jac,
f0=f, bounds=bounds, args=args,
kwargs=kwargs, sparsity=jac_sparsity)
if J.ndim != 2: # J is guaranteed not sparse.
J = np.atleast_2d(J)
return J
J0 = jac_wrapped(x0, f0)
if J0 is not None:
if J0.shape != (m, n):
raise ValueError(
"The return value of `jac` has wrong shape: expected {0}, "
"actual {1}.".format((m, n), J0.shape))
if not isinstance(J0, np.ndarray):
if method == 'lm':
raise ValueError("method='lm' works only with dense "
"Jacobian matrices.")
if tr_solver == 'exact':
raise ValueError(
"tr_solver='exact' works only with dense "
"Jacobian matrices.")
jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac'
if isinstance(J0, LinearOperator) and jac_scale:
raise ValueError("x_scale='jac' can't be used when `jac` "
"returns LinearOperator.")
if tr_solver is None:
if isinstance(J0, np.ndarray):
tr_solver = 'exact'
else:
tr_solver = 'lsmr'
if method == 'lm':
result = call_minpack(fun_wrapped, x0, jac_wrapped, ftol, xtol, gtol,
max_nfev, x_scale, diff_step)
elif method == 'trf':
result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol,
gtol, max_nfev, x_scale, loss_function, tr_solver,
tr_options.copy(), verbose)
elif method == 'dogbox':
if tr_solver == 'lsmr' and 'regularize' in tr_options:
warn("The keyword 'regularize' in `tr_options` is not relevant "
"for 'dogbox' method.")
tr_options = tr_options.copy()
del tr_options['regularize']
result = dogbox(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol,
xtol, gtol, max_nfev, x_scale, loss_function,
tr_solver, tr_options, verbose)
result.message = TERMINATION_MESSAGES[result.status]
result.success = result.status > 0
if verbose >= 1:
print(result.message)
print("Function evaluations {0}, initial cost {1:.4e}, final cost "
"{2:.4e}, first-order optimality {3:.2e}."
.format(result.nfev, initial_cost, result.cost,
result.optimality))
return result
|
bsd-3-clause
|
hrjn/scikit-learn
|
examples/feature_selection/plot_rfe_with_cross_validation.py
|
161
|
1380
|
"""
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
|
bsd-3-clause
|
linbojin/dv4sa
|
sentiment-analysis-bow.py
|
1
|
1830
|
#!usr/bin/env python
# coding = utf-8
"""
Sentiment Analysis based on bag_of_words for unsplited datasets by CV=10
sklearn tf-idf
customed bag_of_words
cred-tfidf
Author: Linbo
Date: 15.03.2015
"""
import doc2vec
import numpy as np
from sklearn.svm import LinearSVC
print "Sentiment Analysis based on Bag of words"
############ Load dataset ##################
print "Loading dataset ... "
path = './datasets/'
dataset = 'rt-polarity'
dataset = 'mpqa'
data_folder = [path+dataset+".pos", path+dataset+".neg"]
# dataset = 'subj'
# data_folder = [path+dataset+".objective", path+dataset+".subjective"]
d2v_model = doc2vec.load_docs(data_folder, clean_string=True)
print "Done!"
#############################################
#######################################
print "Run algorithms, CV=10"
train_results=[]
test_results = []
c=1
r = range(0, 10)
for i in r:
print "cv = %d" % i
d2v_model.train_test_split(i)
d2v_model.count_data()
d2v_model.get_bag_of_words_sklearn() # 77.1 c=1 tf-idf weight scheme in sklearn
# d2v_model.get_bag_of_words(cre_adjust=False) # 77.2 c=1 custom tf-idf
# d2v_model.get_bag_of_words(cre_adjust=True) # 77.5 c=1 custom cre tf-idf weight
text_clf = LinearSVC(C=c)
_ = text_clf.fit(d2v_model.train_doc_vecs, d2v_model.train_labels)
perf = text_clf.score(d2v_model.test_doc_vecs, d2v_model.test_labels)
perf2 = text_clf.score(d2v_model.train_doc_vecs, d2v_model.train_labels)
print " Train accuracy:" + str(perf2)
print " Test accuracy:" + str(perf)
print
train_results.append(perf2)
test_results.append(perf)
print "****** (c=%f) ******" % c
print " Train Average accuracy: %f" % np.mean(train_results)
print " Test Average accuracy: %f \n" % np.mean(test_results)
|
gpl-2.0
|
tomvand/paparazzi-gazebo
|
sw/misc/attitude_reference/pat/utils.py
|
42
|
6283
|
#
# Copyright 2013-2014 Antoine Drouin ([email protected])
#
# This file is part of PAT.
#
# PAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PAT. If not, see <http://www.gnu.org/licenses/>.
#
"""
Utility functions
"""
import math
import numpy as np
import numpy.linalg as linalg
import pdb
"""
Unit convertions
"""
def rad_of_deg(d):
return d / 180. * math.pi
def sqrad_of_sqdeg(d):
return d / (180. * math.pi) ** 2
def deg_of_rad(r):
return r * 180. / math.pi
def sqdeg_of_sqrad(r):
return r * (180. / math.pi) ** 2
def rps_of_rpm(r):
return r * 2. * math.pi / 60.
def rpm_of_rps(r):
return r / 2. / math.pi * 60.
# http://en.wikipedia.org/wiki/Nautical_mile
def m_of_NM(nm):
return nm * 1852.
def NM_of_m(m):
return m / 1852.
# http://en.wikipedia.org/wiki/Knot_(speed)
def mps_of_kt(kt):
return kt * 0.514444
def kt_of_mps(mps):
return mps / 0.514444
# http://en.wikipedia.org/wiki/Foot_(unit)
def m_of_ft(ft):
return ft * 0.3048
def ft_of_m(m):
return m / 0.3048
# feet per minute to/from meters per second
def ftpm_of_mps(mps):
return mps * 60. * 3.28084
def mps_of_ftpm(ftpm):
return ftpm / 60. / 3.28084
"""
Cliping
"""
def norm_angle_0_2pi(a):
while a > 2. * math.pi:
a -= 2. * math.pi
while a <= 0:
a += 2. * math.pi
return a
def norm_angle_mpi_pi(a):
while a > math.pi:
a -= 2. * math.pi
while a <= -math.pi:
a += 2. * math.pi
return a
#
def saturate(_v, _min, _max):
if _v < _min:
return _min
if _v > _max:
return _max
return _v
"""
Plotting
"""
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
my_title_spec = {'color': 'k', 'fontsize': 20}
def save_if(filename):
if filename: matplotlib.pyplot.savefig(filename, dpi=80)
def prepare_fig(fig=None, window_title=None, figsize=(20.48, 10.24), margins=None):
if fig is None:
fig = plt.figure(figsize=figsize)
# else:
# plt.figure(fig.number)
if margins:
left, bottom, right, top, wspace, hspace = margins
fig.subplots_adjust(left=left, right=right, bottom=bottom, top=top,
hspace=hspace, wspace=wspace)
if window_title:
fig.canvas.set_window_title(window_title)
return fig
def decorate(ax, title=None, xlab=None, ylab=None, legend=None, xlim=None, ylim=None):
ax.xaxis.grid(color='k', linestyle='-', linewidth=0.2)
ax.yaxis.grid(color='k', linestyle='-', linewidth=0.2)
if xlab:
ax.xaxis.set_label_text(xlab)
if ylab:
ax.yaxis.set_label_text(ylab)
if title:
ax.set_title(title, my_title_spec)
if legend is not None:
ax.legend(legend, loc='best')
if xlim is not None:
ax.set_xlim(xlim[0], xlim[1])
if ylim is not None:
ax.set_ylim(ylim[0], ylim[1])
def ensure_ylim(ax, yspan):
ymin, ymax = ax.get_ylim()
if ymax - ymin < yspan:
ym = (ymin + ymax) / 2
ax.set_ylim(ym - yspan / 2, ym + yspan / 2)
def write_text(nrows, ncols, plot_number, text, colspan=1, loc=[[0.5, 9.7]], filename=None):
# ax = plt.subplot(nrows, ncols, plot_number)
gs = gridspec.GridSpec(nrows, ncols)
row, col = divmod(plot_number - 1, ncols)
ax = plt.subplot(gs[row, col:col + colspan])
plt.axis([0, 10, 0, 10])
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
for i in range(0, len(text)):
plt.text(loc[i][0], loc[i][1], text[i], ha='left', va='top')
save_if(filename)
def plot_in_grid(time, plots, ncol, figure=None, window_title="None", legend=None, filename=None,
margins=(0.04, 0.08, 0.93, 0.96, 0.20, 0.34)):
nrow = math.ceil(len(plots) / float(ncol))
figsize = (10.24 * ncol, 2.56 * nrow)
figure = prepare_fig(figure, window_title, figsize=figsize, margins=margins)
# pdb.set_trace()
for i, (title, ylab, data) in enumerate(plots):
ax = figure.add_subplot(nrow, ncol, i + 1)
ax.plot(time, data)
decorate(ax, title=title, ylab=ylab)
if legend is not None:
ax.legend(legend, loc='best')
save_if(filename)
return figure
"""
Misc
"""
def num_jacobian(X, U, P, dyn):
s_size = len(X)
i_size = len(U)
epsilonX = (0.1 * np.ones(s_size)).tolist()
dX = np.diag(epsilonX)
A = np.zeros((s_size, s_size))
for i in range(0, s_size):
dx = dX[i, :]
delta_f = dyn(X + dx / 2, 0, U, P) - dyn(X - dx / 2, 0, U, P)
delta_f = delta_f / dx[i]
# print delta_f
A[:, i] = delta_f
epsilonU = (0.1 * np.ones(i_size)).tolist()
dU = np.diag(epsilonU)
B = np.zeros((s_size, i_size))
for i in range(0, i_size):
du = dU[i, :]
delta_f = dyn(X, 0, U + du / 2, P) - dyn(X, 0, U - du / 2, P)
delta_f = delta_f / du[i]
B[:, i] = delta_f
return A, B
def saturate(V, Sats):
Vsat = np.array(V)
for i in range(0, len(V)):
if Vsat[i] < Sats[i, 0]:
Vsat[i] = Sats[i, 0]
elif Vsat[i] > Sats[i, 1]:
Vsat[i] = Sats[i, 1]
return Vsat
def print_lti_dynamics(A, B, txt=None, print_original_form=False, print_modal_form=False):
if txt:
print txt
if print_original_form:
print "A\n", A
print "B\n", B
w, M = np.linalg.eig(A)
print "modes \n", w
if print_modal_form:
# print "eigen vectors\n", M
# invM = np.linalg.inv(M)
# print "invM\n", invM
# Amod = np.dot(np.dot(invM, A), M)
# print "Amod\n", Amod
for i in range(len(w)):
print w[i], "->", M[:, i]
|
gpl-2.0
|
mesnilgr/fast-rcnn
|
lib/roi_data_layer/minibatch.py
|
44
|
7337
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R-CNN network."""
import numpy as np
import numpy.random as npr
import cv2
from fast_rcnn.config import cfg
from utils.blob import prep_im_for_blob, im_list_to_blob
def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)
# Get the input image blob, formatted for caffe
im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
# Now, build the region of interest and label blobs
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros((0), dtype=np.float32)
bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32)
bbox_loss_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
# all_overlaps = []
for im_i in xrange(num_images):
labels, overlaps, im_rois, bbox_targets, bbox_loss \
= _sample_rois(roidb[im_i], fg_rois_per_image, rois_per_image,
num_classes)
# Add to RoIs blob
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = im_i * np.ones((rois.shape[0], 1))
rois_blob_this_image = np.hstack((batch_ind, rois))
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
# Add to labels, bbox targets, and bbox loss blobs
labels_blob = np.hstack((labels_blob, labels))
bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets))
bbox_loss_blob = np.vstack((bbox_loss_blob, bbox_loss))
# all_overlaps = np.hstack((all_overlaps, overlaps))
# For debug visualizations
# _vis_minibatch(im_blob, rois_blob, labels_blob, all_overlaps)
blobs = {'data': im_blob,
'rois': rois_blob,
'labels': labels_blob}
if cfg.TRAIN.BBOX_REG:
blobs['bbox_targets'] = bbox_targets_blob
blobs['bbox_loss_weights'] = bbox_loss_blob
return blobs
def _sample_rois(roidb, fg_rois_per_image, rois_per_image, num_classes):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# label = class RoI has max overlap with
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
rois = roidb['boxes']
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image,
replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image,
bg_inds.size)
# Sample foreground regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image,
replace=False)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[fg_rois_per_this_image:] = 0
overlaps = overlaps[keep_inds]
rois = rois[keep_inds]
bbox_targets, bbox_loss_weights = \
_get_bbox_regression_labels(roidb['bbox_targets'][keep_inds, :],
num_classes)
return labels, overlaps, rois, bbox_targets, bbox_loss_weights
def _get_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in xrange(num_images):
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def _project_im_rois(im_rois, im_scale_factor):
"""Project image RoIs into the rescaled training image."""
rois = im_rois * im_scale_factor
return rois
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_loss_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
bbox_loss_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind]
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_loss_weights[ind, start:end] = [1., 1., 1., 1.]
return bbox_targets, bbox_loss_weights
def _vis_minibatch(im_blob, rois_blob, labels_blob, overlaps):
"""Visualize a mini-batch for debugging."""
import matplotlib.pyplot as plt
for i in xrange(rois_blob.shape[0]):
rois = rois_blob[i, :]
im_ind = rois[0]
roi = rois[1:]
im = im_blob[im_ind, :, :, :].transpose((1, 2, 0)).copy()
im += cfg.PIXEL_MEANS
im = im[:, :, (2, 1, 0)]
im = im.astype(np.uint8)
cls = labels_blob[i]
plt.imshow(im)
print 'class: ', cls, ' overlap: ', overlaps[i]
plt.gca().add_patch(
plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0],
roi[3] - roi[1], fill=False,
edgecolor='r', linewidth=3)
)
plt.show()
|
mit
|
alexandrudaia/gplearn
|
setup.py
|
2
|
2212
|
#! /usr/bin/env python
"""Genetic Programming in Python, with a scikit-learn inspired API"""
import sys
from sklearn.externals import joblib
from setuptools import setup, find_packages
import gplearn
DESCRIPTION = __doc__
VERSION = gplearn.__version__
setup_options = dict(
name='gplearn',
version=VERSION,
description=DESCRIPTION,
long_description=open("README.rst").read(),
classifiers=['Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'],
author='Trevor Stephens',
author_email='[email protected]',
url='https://github.com/trevorstephens/gplearn',
license='new BSD',
packages=find_packages(),
test_suite='nose.collector',
zip_safe=False,
package_data={
'': ['LICENSE'],
'gplearn': ['tests/*.py', 'skutils/*.py', 'skutils/tests/*.py'],
},
install_requires=['scikit-learn>=0.15.2'],
extras_require={'testing': ['nose'],
'docs': ['Sphinx']}
)
# For these actions, NumPy is not required. We want them to succeed without,
# for example when pip is used to install seqlearn without NumPy present.
NO_NUMPY_ACTIONS = ('--help-commands', 'egg_info', '--version', 'clean')
if not ('--help' in sys.argv[1:]
or len(sys.argv) > 1 and sys.argv[1] in NO_NUMPY_ACTIONS):
import numpy as np
setup_options['include_dirs'] = [np.get_include()]
setup(**setup_options)
|
bsd-3-clause
|
ilyes14/scikit-learn
|
sklearn/covariance/robust_covariance.py
|
198
|
29735
|
"""
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
|
bsd-3-clause
|
frank-tancf/scikit-learn
|
sklearn/__check_build/__init__.py
|
345
|
1671
|
""" Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
|
bsd-3-clause
|
JerryLead/spark
|
python/pyspark/sql/context.py
|
2
|
23646
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
if sys.version >= '3':
basestring = unicode = str
from pyspark import since
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.session import _monkey_patch_RDD, SparkSession
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import IntegerType, Row, StringType
from pyspark.sql.utils import install_exception_handler
__all__ = ["SQLContext", "HiveContext", "UDFRegistration"]
class SQLContext(object):
"""The entry point for working with structured data (rows and columns) in Spark, in Spark 1.x.
As of Spark 2.0, this is replaced by :class:`SparkSession`. However, we are keeping the class
here for backward compatibility.
A SQLContext can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
:param sparkContext: The :class:`SparkContext` backing this SQLContext.
:param sparkSession: The :class:`SparkSession` around which this SQLContext wraps.
:param jsqlContext: An optional JVM Scala SQLContext. If set, we do not instantiate a new
SQLContext in the JVM, instead we make all calls to this object.
"""
_instantiatedContext = None
@ignore_unicode_prefix
def __init__(self, sparkContext, sparkSession=None, jsqlContext=None):
"""Creates a new SQLContext.
>>> from datetime import datetime
>>> sqlContext = SQLContext(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if sparkSession is None:
sparkSession = SparkSession.builder.getOrCreate()
if jsqlContext is None:
jsqlContext = sparkSession._jwrapped
self.sparkSession = sparkSession
self._jsqlContext = jsqlContext
_monkey_patch_RDD(self.sparkSession)
install_exception_handler()
if SQLContext._instantiatedContext is None:
SQLContext._instantiatedContext = self
@property
def _ssql_ctx(self):
"""Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
return self._jsqlContext
@classmethod
@since(1.6)
def getOrCreate(cls, sc):
"""
Get the existing SQLContext or create a new one with given SparkContext.
:param sc: SparkContext
"""
if cls._instantiatedContext is None:
jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc())
sparkSession = SparkSession(sc, jsqlContext.sparkSession())
cls(sc, sparkSession, jsqlContext)
return cls._instantiatedContext
@since(1.6)
def newSession(self):
"""
Returns a new SQLContext as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self.sparkSession.newSession())
@since(1.3)
def setConf(self, key, value):
"""Sets the given Spark SQL configuration property.
"""
self.sparkSession.conf.set(key, value)
@ignore_unicode_prefix
@since(1.3)
def getConf(self, key, defaultValue=None):
"""Returns the value of Spark SQL configuration property for the given key.
If the key is not set and defaultValue is not None, return
defaultValue. If the key is not set and defaultValue is None, return
the system default value.
>>> sqlContext.getConf("spark.sql.shuffle.partitions")
u'200'
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'10'
>>> sqlContext.setConf("spark.sql.shuffle.partitions", u"50")
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'50'
"""
return self.sparkSession.conf.get(key, defaultValue)
@property
@since("1.3.1")
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
return UDFRegistration(self)
@since(1.4)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> sqlContext.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> sqlContext.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
return self.sparkSession.range(start, end, step, numPartitions)
@ignore_unicode_prefix
@since(1.2)
def registerFunction(self, name, f, returnType=StringType()):
"""Registers a python function (including lambda function) as a UDF
so it can be used in SQL statements.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not given it default to a string and conversion will automatically
be done. For any other return type, the produced object must match the specified type.
:param name: name of the UDF
:param f: python function
:param returnType: a :class:`pyspark.sql.types.DataType` object
>>> sqlContext.registerFunction("stringLengthString", lambda x: len(x))
>>> sqlContext.sql("SELECT stringLengthString('test')").collect()
[Row(stringLengthString(test)=u'4')]
>>> from pyspark.sql.types import IntegerType
>>> sqlContext.registerFunction("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
>>> from pyspark.sql.types import IntegerType
>>> sqlContext.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
"""
self.sparkSession.catalog.registerFunction(name, f, returnType)
@ignore_unicode_prefix
@since(2.1)
def registerJavaFunction(self, name, javaClassName, returnType=None):
"""Register a java UDF so it can be used in SQL statements.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not specified we would infer it via reflection.
:param name: name of the UDF
:param javaClassName: fully qualified name of java class
:param returnType: a :class:`pyspark.sql.types.DataType` object
>>> sqlContext.registerJavaFunction("javaStringLength",
... "test.org.apache.spark.sql.JavaStringLength", IntegerType())
>>> sqlContext.sql("SELECT javaStringLength('test')").collect()
[Row(UDF(test)=4)]
>>> sqlContext.registerJavaFunction("javaStringLength2",
... "test.org.apache.spark.sql.JavaStringLength")
>>> sqlContext.sql("SELECT javaStringLength2('test')").collect()
[Row(UDF(test)=4)]
"""
jdt = None
if returnType is not None:
jdt = self.sparkSession._jsparkSession.parseDataType(returnType.json())
self.sparkSession._jsparkSession.udf().registerJava(name, javaClassName, jdt)
# TODO(andrew): delete this once we refactor things to take in SparkSession
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
return self.sparkSession._inferSchema(rdd, samplingRatio)
@since(1.3)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. :class:`Row`,
:class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or
:class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.0
The ``schema`` parameter can be a :class:`pyspark.sql.types.DataType` or a
datatype string after 2.0.
If it's not a :class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` and each record will also be wrapped into a tuple.
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> sqlContext.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> sqlContext.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> sqlContext.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> sqlContext.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = sqlContext.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = sqlContext.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = sqlContext.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> sqlContext.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> sqlContext.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> sqlContext.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
return self.sparkSession.createDataFrame(data, schema, samplingRatio, verifySchema)
@since(1.3)
def registerDataFrameAsTable(self, df, tableName):
"""Registers the given :class:`DataFrame` as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of :class:`SQLContext`.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
"""
df.createOrReplaceTempView(tableName)
@since(1.6)
def dropTempTable(self, tableName):
""" Remove the temp table from catalog.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> sqlContext.dropTempTable("table1")
"""
self.sparkSession.catalog.dropTempView(tableName)
@since(1.3)
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
:return: :class:`DataFrame`
"""
return self.sparkSession.catalog.createExternalTable(
tableName, path, source, schema, **options)
@ignore_unicode_prefix
@since(1.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return self.sparkSession.sql(sqlQuery)
@since(1.0)
def table(self, tableName):
"""Returns the specified table or view as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return self.sparkSession.table(tableName)
@ignore_unicode_prefix
@since(1.3)
def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanType` indicating if a table is a temporary one or not).
:param dbName: string, name of the database to use.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.tables()
>>> df2.filter("tableName = 'table1'").first()
Row(database=u'', tableName=u'table1', isTemporary=True)
"""
if dbName is None:
return DataFrame(self._ssql_ctx.tables(), self)
else:
return DataFrame(self._ssql_ctx.tables(dbName), self)
@since(1.3)
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
:param dbName: string, name of the database to use. Default to the current database.
:return: list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("default")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
@since(1.0)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
@since(1.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
@since(1.3)
def clearCache(self):
"""Removes all cached tables from the in-memory cache. """
self._ssql_ctx.clearCache()
@property
@since(1.4)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Experimental.
:return: :class:`DataStreamReader`
>>> text_sdf = sqlContext.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
"""
return DataStreamReader(self)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Experimental.
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._ssql_ctx.streams())
class HiveContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in Hive.
Configuration for Hive is read from ``hive-site.xml`` on the classpath.
It supports running both SQL and HiveQL commands.
:param sparkContext: The SparkContext to wrap.
:param jhiveContext: An optional JVM Scala HiveContext. If set, we do not instantiate a new
:class:`HiveContext` in the JVM, instead we make all calls to this object.
.. note:: Deprecated in 2.0.0. Use SparkSession.builder.enableHiveSupport().getOrCreate().
"""
def __init__(self, sparkContext, jhiveContext=None):
warnings.warn(
"HiveContext is deprecated in Spark 2.0.0. Please use " +
"SparkSession.builder.enableHiveSupport().getOrCreate() instead.",
DeprecationWarning)
if jhiveContext is None:
sparkSession = SparkSession.builder.enableHiveSupport().getOrCreate()
else:
sparkSession = SparkSession(sparkContext, jhiveContext.sparkSession())
SQLContext.__init__(self, sparkContext, sparkSession, jhiveContext)
@classmethod
def _createForTesting(cls, sparkContext):
"""(Internal use only) Create a new HiveContext for testing.
All test code that touches HiveContext *must* go through this method. Otherwise,
you may end up launching multiple derby instances and encounter with incredibly
confusing error messages.
"""
jsc = sparkContext._jsc.sc()
jtestHive = sparkContext._jvm.org.apache.spark.sql.hive.test.TestHiveContext(jsc, False)
return cls(sparkContext, jtestHive)
def refreshTable(self, tableName):
"""Invalidate and refresh all the cached the metadata of the given
table. For performance reasons, Spark SQL or the external data source
library it uses might cache certain metadata about a table, such as the
location of blocks. When those change outside of Spark SQL, users should
call this function to invalidate the cache.
"""
self._ssql_ctx.refreshTable(tableName)
class UDFRegistration(object):
"""Wrapper for user-defined function registration."""
def __init__(self, sqlContext):
self.sqlContext = sqlContext
def register(self, name, f, returnType=StringType()):
return self.sqlContext.registerFunction(name, f, returnType)
register.__doc__ = SQLContext.registerFunction.__doc__
def _test():
import os
import doctest
import tempfile
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.context
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.context.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")]
)
globs['df'] = rdd.toDF()
jsonStrings = [
'{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
'{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},'
'"field6":[{"field7": "row2"}]}',
'{"field1" : null, "field2": "row3", '
'"field3":{"field4":33, "field5": []}}'
]
globs['jsonStrings'] = jsonStrings
globs['json'] = sc.parallelize(jsonStrings)
(failure_count, test_count) = doctest.testmod(
pyspark.sql.context, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
apache-2.0
|
tgquintela/pythonUtils
|
pythonUtils/ExploreDA/Plotting/contdistrib_plot.py
|
1
|
2207
|
"""
Continious distribution plot
----------------------------
Distribution of continious variables can be plotted and inspected with the
functions of this module.
"""
import matplotlib.pyplot as plt
import numpy as np
def cont_distrib_plot(x, n_bins, logscale=False):
"""Function to explore the distribution of a continiuos variable.
Parameters
----------
x: pd.DataFrame
the data variable we want to obtain its distribution.
n_bins: int
the number of bins we want to use to plot the distribution.
logscale: boolean (default=False)
if we want to use logscale for both variables.
Returns
-------
fig: matplotlib.pyplot.figure
the figure of the distribution required of the variable data defined in
`x`.
TODO
----
Kernel density estimation
"""
## 0. Preparing inputs
# Filtering nan
x = x.dropna()
# Median
median = x.quantile(0.5)
x = np.array(x)
### A. Plotting
fig = plt.figure()
## 1. Plot histogram
ax0 = plt.subplot2grid((5, 1), (0, 0), rowspan=4)
ax0.hist(x, n_bins)
# Improving axes
ax0.set_xlim([x.min(), x.max()])
ax0.set_ylabel('Counts')
if logscale:
ax0.set_yscale('log')
ax0.set_xscale('log')
# Mark of median
l1 = plt.axvline(median, linewidth=2, color='r', label='Median',
linestyle='--')
# Mark of mean
l2 = plt.axvline(x.mean(), linewidth=2, color='k', label='Mean',
linestyle='--')
ax0.legend([l1, l2], ['Median', 'Mean'])
ax0.grid(True)
## 2. Plot box_plot
ax1 = plt.subplot2grid((5, 1), (4, 0), sharex=ax0)
ax1.boxplot(x, 0, 'rs', 0, 0.75)
# Making up the plot
mini = x.min()
maxi = x.max()
delta = (maxi-mini)/25.
ax1.set_xlim([mini-delta, maxi+delta])
if logscale:
ax1.set_yscale('log')
ax1.set_xscale('log')
ax1.grid(True)
ax1.set_yticklabels('A')
plt.setp(ax0.get_xticklabels(), visible=False)
## 3. Main settings
fig.suptitle('Distribution exploration of continious variable',
fontsize=14, fontweight='bold')
plt.xlabel('Value')
return fig
|
mit
|
marcocaccin/scikit-learn
|
examples/linear_model/plot_ols_ridge_variance.py
|
387
|
2060
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
|
bsd-3-clause
|
bigdataelephants/scikit-learn
|
sklearn/metrics/tests/test_regression.py
|
31
|
3010
|
from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred)
assert_almost_equal(error, 1 - 5. / 2)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2 = _check_reg_targets(y1, y2)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2)
|
bsd-3-clause
|
mayavanand/RMMAFinalProject
|
azimuth/models/DNN.py
|
3
|
3068
|
import numpy as np
import scipy as sp
import sklearn
def DNN_on_fold(feature_sets, train, test, y, y_all, X, dim, dimsum, learn_options):
import theanets
from sklearn.metrics import accuracy_score
y = np.array(y_all[learn_options['DNN target variable']].values, dtype=float)
y_train, X_train = y[train][:, None], X[train]
y_test, X_test = y[test][:, None], X[test]
num_hidden_layers = [1]#, 2, 3]
num_units = [2]#, 5, 8, 10, 15, 20, 25, 30, 40, 50, 60]
accuracies = np.zeros((len(num_hidden_layers), len(num_units)))
best_score = None
best_model = None
for i, hl in enumerate(num_hidden_layers):
for j, nu in enumerate(num_units):
architecture = np.zeros((2+hl,))
architecture[0] = X_train.shape[1]
architecture[-1] = 1#len(np.unique(y_train))
architecture[1:-1] = [nu for l in range(hl)]
if learn_options["cv"] == "stratified":
label_encoder = sklearn.preprocessing.LabelEncoder()
label_encoder.fit(y_all['Target gene'].values[train])
gene_classes = label_encoder.transform(y_all['Target gene'].values[train])
n_folds = len(np.unique(gene_classes))
cv = sklearn.cross_validation.StratifiedKFold(gene_classes, n_folds=n_folds, shuffle=True)
elif learn_options["cv"]=="gene":
gene_list = np.unique(y_all['Target gene'].values[train])
cv = []
for gene in gene_list:
cv.append(get_train_test(gene, y_all[train]))
n_folds = len(cv)
for train_ind, valid_ind in cv:
# e = theanets.Experiment(
# theanets.Classifier,
# layers=architecture,
# train_batches=32,
# # patience=100,
# # tied_weights=False,
# )
e = theanets.Experiment(
theanets.Regressor,
layers=architecture,
train_batches=32,
# patience=100,
# tied_weights=False,
)
e.run((X_train[train_ind], y_train[train_ind]), (X_train[valid_ind], y_train[valid_ind]))
pred = e.network.predict(X_train[valid_ind])
accuracies[i, j] += sp.stats.spearmanr(pred.flatten(), y_train[valid_ind].flatten())[0]
accuracies[i, j] = accuracies[i, j]/float(n_folds)
if best_score is None or accuracies[i, j] > best_score:
best_score = accuracies[i, j]
best_model = copy.deepcopy(e)
print "DNN with %d hidden layers and %d units, accuracy: %.4f *" % (hl, nu, accuracies[i,j])
else:
print "DNN with %d hidden layers and %d units, accuracy: %.4f" % (hl, nu, accuracies[i,j])
best_model.run((X_train, y_train), (X_test, y_test))
y_pred = best_model.network.predict(X[test])
return y_pred, None
|
bsd-3-clause
|
fzalkow/scikit-learn
|
examples/cluster/plot_color_quantization.py
|
297
|
3443
|
# -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1]
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
|
bsd-3-clause
|
derekjanni/spyre
|
spyre/server.py
|
3
|
13564
|
import matplotlib
matplotlib.use('Agg')
import os, os.path
import json
import jinja2
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import copy
try:
import StringIO as io # python2
except:
import io # python3
try:
from . import model
except:
import model
try:
from . import View
except:
try:
import View
except:
from . import view as View
import cherrypy
from cherrypy.lib.static import serve_file
from cherrypy.lib.static import serve_fileobj
# Settings
include_df_index = False
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
templateLoader = jinja2.FileSystemLoader( searchpath=ROOT_DIR )
templateEnv = jinja2.Environment( loader=templateLoader )
class Root(object):
def __init__(self,templateVars=None,
title="",
inputs=[],
outputs=[],
controls=[],
tabs=None,
getJsonDataFunction=None,
getDataFunction=None,
getTableFunction=None,
getPlotFunction=None,
getImageFunction=None,
getD3Function=None,
getCustomCSSFunction=None,
getCustomJSFunction=None,
getCustomHeadFunction=None,
getHTMLFunction=None,
getDownloadFunction=None,
noOutputFunction=None):
# populate template dictionary for creating input,controler, and output HTML and javascript
if templateVars is not None:
self.templateVars = templateVars
else:
self.templateVars = {}
self.templateVars['title'] = title
# necessary to ensure that spyre apps prior to version 0.2.0 still work
for input in inputs:
if 'input_type' in input:
input['type'] = input['input_type']
if 'variable_name' in input:
input['key'] = input['variable_name']
if 'linked_variable_name' in input:
input['linked_key'] = input['linked_variable_name']
if 'linked_variable_type' in input:
input['linked_type'] = input['linked_variable_type']
self.templateVars['inputs'] = inputs
for control in controls:
if 'control_type' in control:
control['type'] = control['control_type']
if 'control_id' in control:
control['id'] = control['control_id']
self.templateVars['controls'] = controls
for output in outputs:
if 'output_type' in output:
output['type'] = output['output_type']
if 'output_id' in output:
output['id'] = output['output_id']
self.templateVars['outputs'] = outputs
if tabs is not None:
self.templateVars['tabs'] = tabs
self.defaultTemplateVars = self.templateVars
self.getJsonData = getJsonDataFunction
self.getData = getDataFunction
self.getTable = getTableFunction
self.getPlot = getPlotFunction
self.getImage = getImageFunction
self.getD3 = getD3Function
self.getCustomJS = getCustomJSFunction
self.getCustomCSS = getCustomCSSFunction
self.getCustomHead = getCustomHeadFunction
self.getHTML = getHTMLFunction
self.noOutput = noOutputFunction
self.getDownload = getDownloadFunction
d3 = self.getD3()
custom_js = self.getCustomJS()
custom_css = self.getCustomCSS()
custom_head = self.getCustomHead()
self.templateVars['d3js'] = d3['js']
self.templateVars['d3css'] = d3['css']
self.templateVars['custom_js'] = custom_js
self.templateVars['custom_css'] = custom_css
self.templateVars['custom_head'] = custom_head
v = View.View()
self.templateVars['document_ready_js'] = ""
self.templateVars['js'] = v.getJS()
self.templateVars['css'] = v.getCSS()
@cherrypy.expose
def index(self, **args):
self.templateVars = copy.deepcopy(self.defaultTemplateVars) # create a deepcopy so other people's changes aren't cached
clean_args = self.clean_args(args)
self.use_custom_input_values(clean_args)
v = View.View()
template = jinja2.Template(v.getHTML())
return template.render( self.templateVars )
def use_custom_input_values(self, args):
input_registration = {}
index = 0
for input in self.templateVars['inputs']:
input_key = input['key']
# register inputs to be so we can look them up by their variable name later
if 'action_id' in input:
input_registration[input_key] = {"type":input['type'], "action_id":input['action_id']}
else:
input_registration[input_key] = {"type":input['type'], "action_id":None}
if input_key in args.keys():
# use value from request
input_value = args[input_key]
elif 'value' in input:
# use value from template
input_value = input['value']
else:
# no value specified
continue
# use the params passed in with the url switch out the default input values
if input['type'] in ['text','slider']:
self.templateVars['inputs'][index]['value'] = input_value
if input['type'] in ['radiobuttons', 'dropdown']:
for option in input['options']:
option['checked'] = (option['value'] == input_value)
if input['type'] == 'checkboxgroup':
index2 = 0
for option in input['options']:
if option['value'] in input_value:
self.templateVars['inputs'][index]['options'][index2]['checked'] = True
else:
self.templateVars['inputs'][index]['options'][index2]['checked'] = False
index2+=1
index+=1
@cherrypy.expose
def plot(self, **args):
args = self.clean_args(args)
p = self.getPlot(args)
d = model.Plot()
buffer = d.getPlotPath(p)
cherrypy.response.headers['Content-Type'] = 'image/png'
return buffer.getvalue()
@cherrypy.expose
def image(self, **args):
args = self.clean_args(args)
img = self.getImage(args)
d = model.Image()
buffer = d.getImagePath(img)
cherrypy.response.headers['Content-Type'] = 'image/jpg'
return buffer.getvalue()
@cherrypy.expose
def data(self, **args):
args = self.clean_args(args)
data = self.getJsonData(args)
cherrypy.response.headers['Content-Type'] = 'application/json'
return json.dumps({'data':data,'args':args}).encode('utf8')
@cherrypy.expose
def table(self, **args):
args = self.clean_args(args)
df = self.getTable(args)
html = df.to_html(index=include_df_index, escape=False)
i = 0
for col in df.columns:
html = html.replace('<th>{}'.format(col),'<th><a onclick="sortTable({},"table0");"><b>{}</b></a>'.format(i,col))
i += 1
html = html.replace('border="1" class="dataframe"','class="sortable" id="sortable"')
html = html.replace('style="text-align: right;"','')
cherrypy.response.headers['Content-Type'] = 'text/html'
return html
@cherrypy.expose
def html(self, **args):
args = self.clean_args(args)
html = self.getHTML(args)
cherrypy.response.headers['Content-Type'] = 'text/html'
return html
@cherrypy.expose
def download(self, **args):
args = self.clean_args(args)
filepath = self.getDownload(args)
if type(filepath).__name__=="str":
return serve_file(filepath, "application/x-download", "attachment", name='data.csv')
if type(filepath).__name__=="instance":
return serve_fileobj(filepath.getvalue(), "application/x-download", "attachment", name='data.csv')
else:
return "error downloading file. filepath must be string of buffer"
@cherrypy.expose
def no_output(self, **args):
args = self.clean_args(args)
self.noOutput(args)
return ''
@cherrypy.expose
def spinning_wheel(self, **args):
v = View.View()
buffer = v.getSpinningWheel()
cherrypy.response.headers['Content-Type'] = 'image/gif'
return buffer.getvalue()
def clean_args(self,args):
for k,v in args.items():
# turn checkbox group string into a list
if v.rfind("__list__") == 0:
tmp = v.split(',')
if len(tmp)>1:
args[k] = tmp[1:]
else:
args[k] = []
# convert to a number
if v.rfind("__float__") == 0:
args[k] = float(v[9:])
return args
class App(object):
title = ""
#Will be used when there are more than one app in a site
app_bar_html = None
outputs = []
inputs = []
controls = []
tabs = None
templateVars = None
def getJsonData(self, params):
"""turns the DataFrame returned by getData into a dictionary
arguments:
the params passed used for table or d3 outputs are forwarded on to getData
"""
df = self.getData(params)
return df.to_dict(orient='records')
def getData(self, params):
"""Override this function
arguments:
params (dict)
returns:
DataFrame
"""
try:
return eval("self."+str(params['output_id'])+"(params)")
except AttributeError:
return pd.DataFrame({'name':['Override','getData() method','to generate tables'], 'count':[1,4,3]})
def getTable(self, params):
"""Used to create html table. Uses dataframe returned by getData by default
override to return a different dataframe.
arguments: params (dict)
returns: html table
"""
return self.getData(params)
def getDownload(self, params):
"""Override this function
arguments: params (dict)
returns: path to file or buffer to be downloaded (string or buffer)
"""
df = self.getData(params)
buffer = io.StringIO()
df.to_csv(buffer, index=False, encoding='utf-8')
filepath = buffer
return filepath
def getPlot(self, params):
"""Override this function
arguments:
params (dict)
returns:
matplotlib.pyplot figure
"""
try:
return eval("self."+str(params['output_id'])+"(params)")
except AttributeError:
try:
return self.getData(params).plot()
except:
fig = plt.figure() # make figure object
splt = fig.add_subplot(1,1,1)
splt.set_title("Override getPlot() method to generate figures")
return fig
def getImage(self, params):
"""Override this function
arguments: params (dict)
returns: matplotlib.image (figure)
"""
try:
return eval("self."+str(params['output_id'])+"(params)")
except AttributeError:
return np.array([[0,0,0]])
def getHTML(self, params):
"""Override this function
arguments: params (dict)
returns: html (string)
"""
try:
return eval("self."+str(params['output_id'])+"(params)")
except AttributeError:
return "<b>Override</b> the getHTML method to insert your own HTML <i>here</i>"
def noOutput(self, params):
"""Override this function
A method for doing stuff that doesn't reququire an output (refreshing data,
updating variables, etc.)
arguments:
params (dict)
"""
try:
return eval("self."+str(params['output_id'])+"(params)")
except AttributeError:
pass
def getD3(self):
d3 = {}
d3['css'] = ""
d3['js'] = ""
return d3
def getCustomJS(self):
"""Override this function
returns:
string of javascript to insert on page load
"""
return ""
def getCustomCSS(self):
"""Override this function
returns:
string of css to insert on page load
"""
return ""
def getCustomHead(self):
"""Override this function
returns:
html to put in html header
"""
return ""
def launch(self,host="local",port=8080):
webapp = self.getRoot()
if host!="local":
cherrypy.server.socket_host = '0.0.0.0'
cherrypy.server.socket_port = port
cherrypy.quickstart(webapp)
def launch_in_notebook(self, port=9095, width=900, height=600):
"""launch the app within an iframe in ipython notebook"""
from IPython.lib import backgroundjobs as bg
from IPython.display import HTML
jobs = bg.BackgroundJobManager()
jobs.new(self.launch, kw=dict(port=port))
return HTML('<iframe src=http://localhost:{} width={} height={}></iframe>'.format(port,width,height))
def getRoot(self):
webapp = Root(templateVars=self.templateVars,
title=self.title,
inputs=self.inputs,
outputs=self.outputs,
controls=self.controls,
tabs=self.tabs,
getJsonDataFunction=self.getJsonData,
getDataFunction=self.getData,
getTableFunction=self.getTable,
getPlotFunction=self.getPlot,
getImageFunction=self.getImage,
getD3Function=self.getD3,
getCustomJSFunction=self.getCustomJS,
getCustomCSSFunction=self.getCustomCSS,
getCustomHeadFunction=self.getCustomHead,
getHTMLFunction=self.getHTML,
getDownloadFunction=self.getDownload,
noOutputFunction=self.noOutput)
return webapp
class Site(object):
"""Creates a 'tree' of cherrypy 'Root' objects that allow for the
creation of multiple apps with routes to different 'apps.'
Calling the launch method will return
"""
def __init__(self, appobj):
self.site_app_bar = list()
self.addIndex(appobj)
def addIndex(self, appobj):
self.site_app_bar.append(("/",
appobj.app_bar_html or appobj.title or "/"))
self.root = appobj().getRoot()
def get_route(self, fullRoute):
routeSplit = fullRoute.split('/')
routeSplit.remove('')
parent = self.root
for route in routeSplit[:-1]:
parent = getattr(parent, route)
return parent, routeSplit[-1]
def addApp(self, appobj, fullRoute):
parent, route = self.get_route(fullRoute)
self.site_app_bar.append((fullRoute,
appobj.app_bar_html or appobj.title or route))
setattr(parent, route, appobj().getRoot())
def getRoot(self):
"""A convenience method to make the site API similar to the app API,
in terms of how the cherrypy Root object is retrieved"""
return self.root
def launch(self, host="local", port=8080):
"""Calling the Launch method on a Site object will serve the top
node of the cherrypy Root object tree"""
#Need to add in the appbar if many apps
self.root.templateVars['app_bar'] = self.site_app_bar
for fullRoute, _ in self.site_app_bar[1:]:
parent, route = self.get_route(fullRoute)
parent.__dict__[route].templateVars['app_bar'] = self.site_app_bar
if host != "local":
cherrypy.server.socket_host = '0.0.0.0'
cherrypy.server.socket_port = port
cherrypy.quickstart(self.root)
class Launch(App):
"""Warning: This class is depricated. Use App instead"""
if __name__=='__main__':
app = App()
app.launch()
|
mit
|
wathen/PhD
|
MHD/FEniCS/MHD/Stabilised/SaddlePointForm/Test/SplitMatrix/ParamTests/MHDmatrixSetup.py
|
1
|
4800
|
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
from dolfin import *
# from MatrixOperations import *
import numpy as np
#import matplotlib.pylab as plt
from scipy.sparse import coo_matrix, csr_matrix, spdiags, bmat
import os, inspect
from HiptmairSetup import BoundaryEdge
import matplotlib
from matplotlib.pylab import plt
import CheckPetsc4py as CP
import MatrixOperations as MO
import time
import PETScIO as IO
import MHDmulti
def BoundaryIndices(mesh):
dim = mesh.geometry().dim()
if dim == 3:
EdgeBoundary = BoundaryEdge(mesh)
EdgeBoundary = numpy.sort(EdgeBoundary)[::2]
else:
B = BoundaryMesh(mesh,"exterior",False)
EdgeBoundary = B.entity_map(1).array()
MagneticBoundary = np.ones(mesh.num_edges())
MagneticBoundary[EdgeBoundary] = 0
Magnetic = spdiags(MagneticBoundary,0,mesh.num_edges(),mesh.num_edges())
B = BoundaryMesh(mesh,"exterior",False)
NodalBoundary = B.entity_map(0).array()#.astype("int","C")
LagrangeBoundary = np.ones(mesh.num_vertices())
LagrangeBoundary[NodalBoundary] = 0
Lagrange = spdiags(LagrangeBoundary,0,mesh.num_vertices(),mesh.num_vertices())
if dim == 3:
VelocityBoundary = np.concatenate((LagrangeBoundary,LagrangeBoundary,LagrangeBoundary),axis=1)
else:
VelocityBoundary = np.concatenate((LagrangeBoundary,LagrangeBoundary),axis=1)
Velocity = spdiags(VelocityBoundary,0,dim*mesh.num_vertices(),dim*mesh.num_vertices())
return [Velocity, Magnetic, Lagrange]
def Assemble(W, NS, Maxwell, Couple, L_ns, L_m, RHSform, BC, Type, IterType):
tic()
if Type == 'NonLinear':
F = assemble(NS[0])
BC[0].apply(F)
F = F.sparray()
if IterType == 'Full':
C = assemble(Couple[0])
C = BC[4]*C.sparray()*BC[3]
else:
C = None
bu = assemble(L_ns-RHSform[0])
bp = assemble(-RHSform[1])
bb = assemble(L_m-RHSform[2])
br = assemble(-RHSform[3])
BC[0].apply(bu)
BC[1].apply(bb)
BC[2].apply(br)
b = np.concatenate((bu.array(),bp.array(),bb.array(),br.array()),axis = 0)
MO.StrTimePrint("MHD non-linear matrix assembled, time: ",toc())
return [F, C],b
elif Type == 'Linear':
M = assemble(Maxwell[0])
D = assemble(Maxwell[2])
SS = assemble(Maxwell[3])
B = assemble(NS[2])
S = assemble(NS[3])
SS = 0*SS
BC[1].apply(M)
BC[2].apply(SS)
B = B.sparray()*BC[3]
S = S.sparray()
M = M.sparray()
D = BC[4]*D.sparray()*BC[5]
SS = SS.sparray()
MO.StrTimePrint("MHD linear matrix assembled, time: ",toc())
return [B,M,D,S,SS]
else:
bu = assemble(L_ns-RHSform[0])
bp = assemble(-RHSform[1])
bb = assemble(L_m-RHSform[2])
br = assemble(-RHSform[3])
BC[0].apply(bu)
BC[1].apply(bb)
BC[2].apply(br)
b = np.concatenate((bu.array(),bp.array(),bb.array(),br.array()),axis = 0)
return IO.arrayToVec(b)
def SystemAssemble(W,A,b,SetupType,IterType):
tic()
if SetupType == 'Matrix':
if IterType == 'Full':
A = CP.Scipy2PETSc(bmat([[A[0],A[2].T,-A[1].T,None],
[A[2],A[5],None,None],
[A[1],None,A[3],A[4]],
[None,None,A[4].T,A[6]]]))
else:
A = CP.Scipy2PETSc(bmat([[A[0],A[2].T,None,None],
[A[2],A[5],None,None],[None,None,A[3],A[4]],
[None,None,A[4].T,A[6]]]))
b = IO.arrayToVec(b)
MO.StrTimePrint("MHD system assemble, time: ",toc())
return A,b
else:
for i in range(len(A)):
if A[i] != None:
A[i] = CP.Scipy2PETSc(A[i])
if IterType == 'Full':
P = PETSc.Mat().createPython([W[0].dim()+W[1].dim()+W[2].dim()+W[3].dim(),W[0].dim()+W[1].dim()+W[2].dim()+W[3].dim()])
P.setType('python')
p = MHDmulti.MHDmat(W,A)
P.setPythonContext(p)
else:
MatFluid = PETSc.Mat().createPython([W[0].dim()+W[1].dim(), W[0].dim()+W[1].dim()])
MatFluid.setType('python')
pFluid = MHDmulti.MatFluid([W[0],W[1]],A)
MatFluid.setPythonContext(pFluid)
MatMag = PETSc.Mat().createPython([W[2].dim()+W[3].dim(), W[2].dim()+W[3].dim()])
MatMag.setType('python')
pMag = MHDmulti.MatMag([W[2],W[3]],A)
MatMag.setPythonContext(pMag)
P = [MatFluid,MatMag]
b = IO.arrayToVec(b)
MO.StrTimePrint("MHD mult-class setup, time: ",toc())
return P,b
|
mit
|
aitatanit/metatlas
|
setup.py
|
1
|
2609
|
"""Setup script for metatlas package.
"""
DISTNAME = 'metatlas'
DESCRIPTION = 'Metabolite Atlas'
LONG_DESCRIPTION = open('README.rst', 'rb').read().decode('utf-8')
MAINTAINER = 'Steven Silvester'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://github.com/metabolite-atlas/metatlas'
LICENSE = 'MIT'
REQUIRES = ["numpy", "pytables", "pymzml", "simplejson", "rpy2", "pandas",
"dataset", "ipython", "traitlets", "six", "tabulate", "dill",
"gspread"]
CLASSIFIERS = """\
Development Status :: 2 - Pre-Alpha
Intended Audience :: Developers
Intended Audience :: Science/Research
License :: OSI Approved :: BSD License
Operating System :: OS Independent
Programming Language :: Python
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3.4
Topic :: Scientific/Engineering
Topic :: Software Development
"""
import imp
import shutil
from setuptools import setup, find_packages
from setuptools.command.install import install
class custom_install(install):
def run(self):
install.run(self)
# patch pymzml to use new obo file
dirname = imp.find_module('pymzml')[1]
shutil.copy('psi-ms-1.2.0.obo', '%s/obo' % dirname)
with open('%s/obo.py' % dirname, 'r') as fid:
lines = fid.readlines()
with open('%s/obo.py' % dirname, 'w') as fid:
for line in lines:
if "version='1.1.0'" in line:
line = line.replace('1.1.0', '1.2.0')
fid.write(line)
with open('metatlas/__init__.py') as fid:
for line in fid:
if line.startswith('__version__'):
version = line.strip().split()[-1][1:-1]
break
if __name__ == "__main__":
setup(
name=DISTNAME,
version=version,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=URL,
download_url=URL,
license=LICENSE,
platforms=["Any"],
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
classifiers=list(filter(None, CLASSIFIERS.split('\n'))),
packages=find_packages(exclude=['doc']),
include_package_data=True,
zip_safe=False, # the package can run out of an .egg file
install_requires=['pymzml', 'simplejson', 'requests_toolbelt',
'dataset', 'ipython', 'traitlets', 'six',
'tabulate', 'dill', 'oauth2client', 'gspread'],
requires=REQUIRES,
cmdclass={'install': custom_install},
)
|
bsd-3-clause
|
hhj0325/pystock
|
com/hhj/maoyan/countByWordCloud.py
|
1
|
1036
|
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
import jieba
comments = []
with open('files/comments.txt', 'r', encoding='utf-8')as f:
rows = f.readlines()
try:
for row in rows:
comment = row.split(',')[2]
if comment != '':
comments.append(comment)
# print(city)
except Exception as e:
print(e)
comment_after_split = jieba.cut(str(comments), cut_all=False)
words = ' '.join(comment_after_split)
# 多虑没用的停止词
stopwords = STOPWORDS.copy()
stopwords.add('电影')
stopwords.add('一部')
stopwords.add('一个')
stopwords.add('没有')
stopwords.add('什么')
stopwords.add('有点')
stopwords.add('感觉')
stopwords.add('毒液')
stopwords.add('就是')
stopwords.add('觉得')
wc = WordCloud(width=1024, height=768, background_color='white', font_path='STKAITI.TTF',
stopwords=stopwords, max_font_size=400, random_state=50)
wc.generate_from_text(words)
plt.imshow(wc)
plt.axis('off')
plt.show()
|
apache-2.0
|
plissonf/scikit-learn
|
sklearn/neural_network/tests/test_rbm.py
|
225
|
6278
|
import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples([np.arange(1000) * 100])
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
|
bsd-3-clause
|
nvoron23/statsmodels
|
statsmodels/base/tests/test_data.py
|
5
|
35051
|
import numpy as np
from numpy.testing import assert_equal, assert_, assert_raises
import pandas
import pandas.util.testing as ptesting
from statsmodels.base import data as sm_data
from statsmodels.formula import handle_formula_data
#class TestDates(object):
# @classmethod
# def setupClass(cls):
# nrows = 10
# cls.dates_result = cls.dates_results = np.random.random(nrows)
#
# def test_dates(self):
# np.testing.assert_equal(data.wrap_output(self.dates_input, 'dates'),
# self.dates_result)
class TestArrays(object):
@classmethod
def setupClass(cls):
cls.endog = np.random.random(10)
cls.exog = np.c_[np.ones(10), np.random.random((10,2))]
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_result = cls.col_input = np.random.random(nvars)
cls.row_result = cls.row_input = np.random.random(nrows)
cls.cov_result = cls.cov_input = np.random.random((nvars, nvars))
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = 'y'
cls.row_labels = None
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog)
np.testing.assert_equal(self.data.exog, self.exog)
def test_attach(self):
data = self.data
# this makes sure what the wrappers need work but not the wrapped
# results themselves
np.testing.assert_equal(data.wrap_output(self.col_input, 'columns'),
self.col_result)
np.testing.assert_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
np.testing.assert_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
def test_names(self):
data = self.data
np.testing.assert_equal(data.xnames, self.xnames)
np.testing.assert_equal(data.ynames, self.ynames)
def test_labels(self):
#HACK: because numpy master after NA stuff assert_equal fails on
# pandas indices
np.testing.assert_(np.all(self.data.row_labels == self.row_labels))
class TestArrays2dEndog(TestArrays):
@classmethod
def setupClass(cls):
super(TestArrays2dEndog, cls).setupClass()
cls.endog = np.random.random((10,1))
cls.exog = np.c_[np.ones(10), np.random.random((10,2))]
cls.data = sm_data.handle_data(cls.endog, cls.exog)
#cls.endog = endog.squeeze()
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.squeeze())
np.testing.assert_equal(self.data.exog, self.exog)
class TestArrays1dExog(TestArrays):
@classmethod
def setupClass(cls):
super(TestArrays1dExog, cls).setupClass()
cls.endog = np.random.random(10)
exog = np.random.random(10)
cls.data = sm_data.handle_data(cls.endog, exog)
cls.exog = exog[:,None]
cls.xnames = ['x1']
cls.ynames = 'y'
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog.squeeze())
class TestDataFrames(TestArrays):
@classmethod
def setupClass(cls):
cls.endog = pandas.DataFrame(np.random.random(10), columns=['y_1'])
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x_1','x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
cls.row_labels = cls.exog.index
def test_orig(self):
ptesting.assert_frame_equal(self.data.orig_endog, self.endog)
ptesting.assert_frame_equal(self.data.orig_exog, self.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog.values)
def test_attach(self):
data = self.data
# this makes sure what the wrappers need work but not the wrapped
# results themselves
ptesting.assert_series_equal(data.wrap_output(self.col_input,
'columns'),
self.col_result)
ptesting.assert_series_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
ptesting.assert_frame_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
class TestLists(TestArrays):
@classmethod
def setupClass(cls):
super(TestLists, cls).setupClass()
cls.endog = np.random.random(10).tolist()
cls.exog = np.c_[np.ones(10), np.random.random((10,2))].tolist()
cls.data = sm_data.handle_data(cls.endog, cls.exog)
class TestRecarrays(TestArrays):
@classmethod
def setupClass(cls):
super(TestRecarrays, cls).setupClass()
cls.endog = np.random.random(9).view([('y_1',
'f8')]).view(np.recarray)
exog = np.random.random(9*3).view([('const', 'f8'),('x_1', 'f8'),
('x_2', 'f8')]).view(np.recarray)
exog['const'] = 1
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.view(float))
np.testing.assert_equal(self.data.exog, self.exog.view((float,3)))
class TestStructarrays(TestArrays):
@classmethod
def setupClass(cls):
super(TestStructarrays, cls).setupClass()
cls.endog = np.random.random(9).view([('y_1',
'f8')]).view(np.recarray)
exog = np.random.random(9*3).view([('const', 'f8'),('x_1', 'f8'),
('x_2', 'f8')]).view(np.recarray)
exog['const'] = 1
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.view(float))
np.testing.assert_equal(self.data.exog, self.exog.view((float,3)))
class TestListDataFrame(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = np.random.random(10).tolist()
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x_1','x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y'
cls.row_labels = cls.exog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog)
np.testing.assert_equal(self.data.exog, self.exog.values)
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
ptesting.assert_frame_equal(self.data.orig_exog, self.exog)
class TestDataFrameList(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = pandas.DataFrame(np.random.random(10), columns=['y_1'])
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x1','x2'])
exog.insert(0, 'const', 1)
cls.exog = exog.values.tolist()
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = 'y_1'
cls.row_labels = cls.endog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog)
def test_orig(self):
ptesting.assert_frame_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog)
class TestArrayDataFrame(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = np.random.random(10)
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x_1','x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y'
cls.row_labels = cls.exog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog)
np.testing.assert_equal(self.data.exog, self.exog.values)
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
ptesting.assert_frame_equal(self.data.orig_exog, self.exog)
class TestDataFrameArray(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = pandas.DataFrame(np.random.random(10), columns=['y_1'])
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x1','x2']) # names mimic defaults
exog.insert(0, 'const', 1)
cls.exog = exog.values
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = 'y_1'
cls.row_labels = cls.endog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog)
def test_orig(self):
ptesting.assert_frame_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog)
class TestSeriesDataFrame(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = pandas.Series(np.random.random(10), name='y_1')
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x_1','x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
cls.row_labels = cls.exog.index
def test_orig(self):
ptesting.assert_series_equal(self.data.orig_endog, self.endog)
ptesting.assert_frame_equal(self.data.orig_exog, self.exog)
class TestSeriesSeries(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = pandas.Series(np.random.random(10), name='y_1')
exog = pandas.Series(np.random.random(10), name='x_1')
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 1
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index = [exog.name])
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index = exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = [exog.name],
columns = [exog.name])
cls.xnames = ['x_1']
cls.ynames = 'y_1'
cls.row_labels = cls.exog.index
def test_orig(self):
ptesting.assert_series_equal(self.data.orig_endog, self.endog)
ptesting.assert_series_equal(self.data.orig_exog, self.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog.values[:,None])
def test_alignment():
#Fix Issue #206
from statsmodels.regression.linear_model import OLS
from statsmodels.datasets.macrodata import load_pandas
d = load_pandas().data
#growth rates
gs_l_realinv = 400 * np.log(d['realinv']).diff().dropna()
gs_l_realgdp = 400 * np.log(d['realgdp']).diff().dropna()
lint = d['realint'][:-1] # incorrect indexing for test purposes
endog = gs_l_realinv
# re-index because they won't conform to lint
realgdp = gs_l_realgdp.reindex(lint.index, method='bfill')
data = dict(const=np.ones_like(lint), lrealgdp=realgdp, lint=lint)
exog = pandas.DataFrame(data)
# which index do we get??
np.testing.assert_raises(ValueError, OLS, *(endog, exog))
class TestMultipleEqsArrays(TestArrays):
@classmethod
def setupClass(cls):
cls.endog = np.random.random((10,4))
cls.exog = np.c_[np.ones(10), np.random.random((10,2))]
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
neqs = 4
cls.col_result = cls.col_input = np.random.random(nvars)
cls.row_result = cls.row_input = np.random.random(nrows)
cls.cov_result = cls.cov_input = np.random.random((nvars, nvars))
cls.cov_eq_result = cls.cov_eq_input = np.random.random((neqs,neqs))
cls.col_eq_result = cls.col_eq_input = np.array((neqs, nvars))
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = ['y1', 'y2', 'y3', 'y4']
cls.row_labels = None
def test_attach(self):
data = self.data
# this makes sure what the wrappers need work but not the wrapped
# results themselves
np.testing.assert_equal(data.wrap_output(self.col_input, 'columns'),
self.col_result)
np.testing.assert_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
np.testing.assert_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
np.testing.assert_equal(data.wrap_output(self.cov_eq_input, 'cov_eq'),
self.cov_eq_result)
np.testing.assert_equal(data.wrap_output(self.col_eq_input,
'columns_eq'),
self.col_eq_result)
class TestMultipleEqsDataFrames(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = endog = pandas.DataFrame(np.random.random((10,4)),
columns=['y_1', 'y_2', 'y_3', 'y_4'])
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x_1','x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
neqs = 4
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.cov_eq_input = np.random.random((neqs, neqs))
cls.cov_eq_result = pandas.DataFrame(cls.cov_eq_input,
index=endog.columns,
columns=endog.columns)
cls.col_eq_input = np.random.random((nvars, neqs))
cls.col_eq_result = pandas.DataFrame(cls.col_eq_input,
index=exog.columns,
columns=endog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = ['y_1', 'y_2', 'y_3', 'y_4']
cls.row_labels = cls.exog.index
def test_attach(self):
data = self.data
ptesting.assert_series_equal(data.wrap_output(self.col_input,
'columns'),
self.col_result)
ptesting.assert_series_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
ptesting.assert_frame_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
ptesting.assert_frame_equal(data.wrap_output(self.cov_eq_input,
'cov_eq'),
self.cov_eq_result)
ptesting.assert_frame_equal(data.wrap_output(self.col_eq_input,
'columns_eq'),
self.col_eq_result)
class TestMissingArray(object):
@classmethod
def setupClass(cls):
X = np.random.random((25,4))
y = np.random.random(25)
y[10] = np.nan
X[2,3] = np.nan
X[14,2] = np.nan
cls.y, cls.X = y, X
def test_raise_no_missing(self):
# smoke test for #1700
sm_data.handle_data(np.random.random(20), np.random.random((20, 2)),
'raise')
def test_raise(self):
np.testing.assert_raises(Exception, sm_data.handle_data,
(self.y, self.X, 'raise'))
def test_drop(self):
y = self.y
X = self.X
combined = np.c_[y, X]
idx = ~np.isnan(combined).any(axis=1)
y = y[idx]
X = X[idx]
data = sm_data.handle_data(self.y, self.X, 'drop')
np.testing.assert_array_equal(data.endog, y)
np.testing.assert_array_equal(data.exog, X)
def test_none(self):
data = sm_data.handle_data(self.y, self.X, 'none', hasconst=False)
np.testing.assert_array_equal(data.endog, self.y)
np.testing.assert_array_equal(data.exog, self.X)
def test_endog_only_raise(self):
np.testing.assert_raises(Exception, sm_data.handle_data,
(self.y, None, 'raise'))
def test_endog_only_drop(self):
y = self.y
y = y[~np.isnan(y)]
data = sm_data.handle_data(self.y, None, 'drop')
np.testing.assert_array_equal(data.endog, y)
def test_mv_endog(self):
y = self.X
y = y[~np.isnan(y).any(axis=1)]
data = sm_data.handle_data(self.X, None, 'drop')
np.testing.assert_array_equal(data.endog, y)
def test_extra_kwargs_2d(self):
sigma = np.random.random((25, 25))
sigma = sigma + sigma.T - np.diag(np.diag(sigma))
data = sm_data.handle_data(self.y, self.X, 'drop', sigma=sigma)
idx = ~np.isnan(np.c_[self.y, self.X]).any(axis=1)
sigma = sigma[idx][:,idx]
np.testing.assert_array_equal(data.sigma, sigma)
def test_extra_kwargs_1d(self):
weights = np.random.random(25)
data = sm_data.handle_data(self.y, self.X, 'drop', weights=weights)
idx = ~np.isnan(np.c_[self.y, self.X]).any(axis=1)
weights = weights[idx]
np.testing.assert_array_equal(data.weights, weights)
class TestMissingPandas(object):
@classmethod
def setupClass(cls):
X = np.random.random((25,4))
y = np.random.random(25)
y[10] = np.nan
X[2,3] = np.nan
X[14,2] = np.nan
cls.y, cls.X = pandas.Series(y), pandas.DataFrame(X)
def test_raise_no_missing(self):
# smoke test for #1700
sm_data.handle_data(pandas.Series(np.random.random(20)),
pandas.DataFrame(np.random.random((20, 2))),
'raise')
def test_raise(self):
np.testing.assert_raises(Exception, sm_data.handle_data,
(self.y, self.X, 'raise'))
def test_drop(self):
y = self.y
X = self.X
combined = np.c_[y, X]
idx = ~np.isnan(combined).any(axis=1)
y = y.ix[idx]
X = X.ix[idx]
data = sm_data.handle_data(self.y, self.X, 'drop')
np.testing.assert_array_equal(data.endog, y.values)
ptesting.assert_series_equal(data.orig_endog, self.y.ix[idx])
np.testing.assert_array_equal(data.exog, X.values)
ptesting.assert_frame_equal(data.orig_exog, self.X.ix[idx])
def test_none(self):
data = sm_data.handle_data(self.y, self.X, 'none', hasconst=False)
np.testing.assert_array_equal(data.endog, self.y.values)
np.testing.assert_array_equal(data.exog, self.X.values)
def test_endog_only_raise(self):
np.testing.assert_raises(Exception, sm_data.handle_data,
(self.y, None, 'raise'))
def test_endog_only_drop(self):
y = self.y
y = y.dropna()
data = sm_data.handle_data(self.y, None, 'drop')
np.testing.assert_array_equal(data.endog, y.values)
def test_mv_endog(self):
y = self.X
y = y.ix[~np.isnan(y.values).any(axis=1)]
data = sm_data.handle_data(self.X, None, 'drop')
np.testing.assert_array_equal(data.endog, y.values)
def test_labels(self):
2, 10, 14
labels = pandas.Index([0, 1, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24])
data = sm_data.handle_data(self.y, self.X, 'drop')
np.testing.assert_(data.row_labels.equals(labels))
class TestConstant(object):
@classmethod
def setupClass(cls):
from statsmodels.datasets.longley import load_pandas
cls.data = load_pandas()
def test_array_constant(self):
exog = self.data.exog.copy()
exog['const'] = 1
data = sm_data.handle_data(self.data.endog.values, exog.values)
np.testing.assert_equal(data.k_constant, 1)
np.testing.assert_equal(data.const_idx, 6)
def test_pandas_constant(self):
exog = self.data.exog.copy()
exog['const'] = 1
data = sm_data.handle_data(self.data.endog, exog)
np.testing.assert_equal(data.k_constant, 1)
np.testing.assert_equal(data.const_idx, 6)
def test_pandas_noconstant(self):
exog = self.data.exog.copy()
data = sm_data.handle_data(self.data.endog, exog)
np.testing.assert_equal(data.k_constant, 0)
np.testing.assert_equal(data.const_idx, None)
def test_array_noconstant(self):
exog = self.data.exog.copy()
data = sm_data.handle_data(self.data.endog.values, exog.values)
np.testing.assert_equal(data.k_constant, 0)
np.testing.assert_equal(data.const_idx, None)
class TestHandleMissing(object):
def test_pandas(self):
df = ptesting.makeDataFrame()
df.values[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = df[df.columns[0]], df[df.columns[1:]]
data, _ = sm_data.handle_missing(y, X, missing='drop')
df = df.dropna()
y_exp, X_exp = df[df.columns[0]], df[df.columns[1:]]
ptesting.assert_frame_equal(data['exog'], X_exp)
ptesting.assert_series_equal(data['endog'], y_exp)
def test_arrays(self):
arr = np.random.randn(20, 4)
arr[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = arr[:,0], arr[:,1:]
data, _ = sm_data.handle_missing(y, X, missing='drop')
bools_mask = np.ones(20, dtype=bool)
bools_mask[[2, 5, 10]] = False
y_exp = arr[bools_mask, 0]
X_exp = arr[bools_mask, 1:]
np.testing.assert_array_equal(data['endog'], y_exp)
np.testing.assert_array_equal(data['exog'], X_exp)
def test_pandas_array(self):
df = ptesting.makeDataFrame()
df.values[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = df[df.columns[0]], df[df.columns[1:]].values
data, _ = sm_data.handle_missing(y, X, missing='drop')
df = df.dropna()
y_exp, X_exp = df[df.columns[0]], df[df.columns[1:]].values
np.testing.assert_array_equal(data['exog'], X_exp)
ptesting.assert_series_equal(data['endog'], y_exp)
def test_array_pandas(self):
df = ptesting.makeDataFrame()
df.values[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = df[df.columns[0]].values, df[df.columns[1:]]
data, _ = sm_data.handle_missing(y, X, missing='drop')
df = df.dropna()
y_exp, X_exp = df[df.columns[0]].values, df[df.columns[1:]]
ptesting.assert_frame_equal(data['exog'], X_exp)
np.testing.assert_array_equal(data['endog'], y_exp)
def test_noop(self):
df = ptesting.makeDataFrame()
df.values[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = df[df.columns[0]], df[df.columns[1:]]
data, _ = sm_data.handle_missing(y, X, missing='none')
y_exp, X_exp = df[df.columns[0]], df[df.columns[1:]]
ptesting.assert_frame_equal(data['exog'], X_exp)
ptesting.assert_series_equal(data['endog'], y_exp)
class CheckHasConstant(object):
def test_hasconst(self):
for x, result in zip(self.exogs, self.results):
mod = self.mod(self.y, x)
assert_equal(mod.k_constant, result[0]) #['k_constant'])
assert_equal(mod.data.k_constant, result[0])
if result[1] is None:
assert_(mod.data.const_idx is None)
else:
assert_equal(mod.data.const_idx, result[1])
# extra check after fit, some models raise on singular
fit_kwds = getattr(self, 'fit_kwds', {})
try:
res = mod.fit(**fit_kwds)
assert_equal(res.model.k_constant, result[0])
assert_equal(res.model.data.k_constant, result[0])
except:
pass
@classmethod
def setup_class(cls):
# create data
np.random.seed(0)
cls.y_c = np.random.randn(20)
cls.y_bin = (cls.y_c > 0).astype(int)
x1 = np.column_stack((np.ones(20), np.zeros(20)))
result1 = (1, 0)
x2 = np.column_stack((np.arange(20) < 10.5,
np.arange(20) > 10.5)).astype(float)
result2 = (1, None)
x3 = np.column_stack((np.arange(20), np.zeros(20.)))
result3 = (0, None)
x4 = np.column_stack((np.arange(20), np.zeros((20., 2))))
result4 = (0, None)
x5 = np.column_stack((np.zeros(20), 0.5 * np.ones(20)))
result5 = (1, 1)
x5b = np.column_stack((np.arange(20), np.ones((20, 3))))
result5b = (1, 1)
x5c = np.column_stack((np.arange(20), np.ones((20, 3)) * [0.5, 1, 1]))
result5c = (1, 2)
# implicit and zero column
x6 = np.column_stack((np.arange(20) < 10.5,
np.arange(20) > 10.5,
np.zeros(20.))).astype(float)
result6 = (1, None)
x7 = np.column_stack((np.arange(20) < 10.5,
np.arange(20) > 10.5,
np.zeros((20., 2)))).astype(float)
result7 = (1, None)
cls.exogs = (x1, x2, x3, x4, x5, x5b, x5c, x6, x7)
cls.results = (result1, result2, result3, result4, result5, result5b,
result5c, result6, result7)
class TestHasConstantOLS(CheckHasConstant):
def __init__(self):
self.setup_class() # why does nose do it properly
from statsmodels.regression.linear_model import OLS
self.mod = OLS
self.y = self.y_c
class TestHasConstantGLM(CheckHasConstant):
def __init__(self):
self.setup_class() # why does nose do it properly
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import families
self.mod = lambda y, x : GLM(y, x, family=families.Binomial())
self.y = self.y_bin
class TestHasConstantLogit(CheckHasConstant):
def __init__(self):
self.setup_class() # why does nose do it properly
from statsmodels.discrete.discrete_model import Logit
self.mod = Logit
self.y = self.y_bin
self.fit_kwds = {'disp': False}
def test_dtype_object():
# see #880
X = np.random.random((40,2))
df = pandas.DataFrame(X)
df[2] = np.random.randint(2, size=40).astype('object')
df['constant'] = 1
y = pandas.Series(np.random.randint(2, size=40))
np.testing.assert_raises(ValueError, sm_data.handle_data, y, df)
def test_formula_missing_extra_arrays():
np.random.seed(1)
# because patsy can't turn off missing data-handling as of 0.3.0, we need
# separate tests to make sure that missing values are handled correctly
# when going through formulas
# there is a handle_formula_data step
# then there is the regular handle_data step
# see 2083
# the untested cases are endog/exog have missing. extra has missing.
# endog/exog are fine. extra has missing.
# endog/exog do or do not have missing and extra has wrong dimension
y = np.random.randn(10)
y_missing = y.copy()
y_missing[[2, 5]] = np.nan
X = np.random.randn(10)
X_missing = X.copy()
X_missing[[1, 3]] = np.nan
weights = np.random.uniform(size=10)
weights_missing = weights.copy()
weights_missing[[6]] = np.nan
weights_wrong_size = np.random.randn(12)
data = {'y': y,
'X': X,
'y_missing': y_missing,
'X_missing': X_missing,
'weights': weights,
'weights_missing': weights_missing}
data = pandas.DataFrame.from_dict(data)
data['constant'] = 1
formula = 'y_missing ~ X_missing'
((endog, exog),
missing_idx, design_info) = handle_formula_data(data, None, formula,
depth=2,
missing='drop')
kwargs = {'missing_idx': missing_idx, 'missing': 'drop',
'weights': data['weights_missing']}
model_data = sm_data.handle_data(endog, exog, **kwargs)
data_nona = data.dropna()
assert_equal(data_nona['y'].values, model_data.endog)
assert_equal(data_nona[['constant', 'X']].values, model_data.exog)
assert_equal(data_nona['weights'].values, model_data.weights)
tmp = handle_formula_data(data, None, formula, depth=2, missing='drop')
(endog, exog), missing_idx, design_info = tmp
weights_2d = np.random.randn(10, 10)
weights_2d[[8, 7], [7, 8]] = np.nan #symmetric missing values
kwargs.update({'weights': weights_2d,
'missing_idx': missing_idx})
model_data2 = sm_data.handle_data(endog, exog, **kwargs)
good_idx = [0, 4, 6, 9]
assert_equal(data.ix[good_idx, 'y'], model_data2.endog)
assert_equal(data.ix[good_idx, ['constant', 'X']], model_data2.exog)
assert_equal(weights_2d[good_idx][:, good_idx], model_data2.weights)
tmp = handle_formula_data(data, None, formula, depth=2, missing='drop')
(endog, exog), missing_idx, design_info = tmp
kwargs.update({'weights': weights_wrong_size,
'missing_idx': missing_idx})
assert_raises(ValueError, sm_data.handle_data, endog, exog, **kwargs)
if __name__ == "__main__":
import nose
#nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# exit=False)
nose.runmodule(argv=[__file__, '-vvs', '-x'], exit=False)
|
bsd-3-clause
|
mojoboss/scikit-learn
|
examples/svm/plot_svm_regression.py
|
249
|
1451
|
"""
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
|
bsd-3-clause
|
FCP-INDI/nipype
|
tools/run_examples.py
|
1
|
3315
|
from __future__ import print_function
import os
import sys
from shutil import rmtree, copyfile
from multiprocessing import cpu_count
def run_examples(example, pipelines, data_path, plugin=None):
'''
Run example workflows
'''
# Import packages
from nipype import config
from nipype.interfaces.base import CommandLine
from nipype.utils import draw_gantt_chart
from nipype.pipeline.plugins import log_nodes_cb
if plugin is None:
plugin = 'MultiProc'
print('running example: %s with plugin: %s' % (example, plugin))
config.enable_debug_mode()
config.enable_provenance()
CommandLine.set_default_terminal_output("stream")
plugin_args = {}
if plugin == 'MultiProc':
plugin_args['n_procs'] = cpu_count()
__import__(example)
for pipeline in pipelines:
# Init and run workflow
wf = getattr(sys.modules[example], pipeline)
wf.base_dir = os.path.join(os.getcwd(), 'output', example, plugin)
if os.path.exists(wf.base_dir):
rmtree(wf.base_dir)
# Handle a logging directory
log_dir = os.path.join(os.getcwd(), 'logs', example)
if os.path.exists(log_dir):
rmtree(log_dir)
os.makedirs(log_dir)
wf.config = {'execution': {'hash_method': 'timestamp',
'stop_on_first_rerun': 'true',
'write_provenance': 'true'}}
# Callback log setup
if example == 'fmri_spm_nested' and plugin == 'MultiProc' and \
pipeline == 'l2pipeline':
# Init callback log
import logging
cb_log_path = os.path.join(os.path.expanduser('~'), 'callback.log')
cb_logger = logging.getLogger('callback')
cb_logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(cb_log_path)
cb_logger.addHandler(handler)
plugin_args = {'n_procs' : 4, 'status_callback' : log_nodes_cb}
else:
plugin_args = {'n_procs' : 4}
try:
wf.inputs.inputnode.in_data = os.path.abspath(data_path)
except AttributeError:
pass # the workflow does not have inputnode.in_data
wf.run(plugin=plugin, plugin_args=plugin_args)
# Draw gantt chart only if pandas is installed
try:
import pandas
pandas_flg = True
except ImportError as exc:
pandas_flg = False
if plugin_args.has_key('status_callback') and pandas_flg:
draw_gantt_chart.generate_gantt_chart(cb_log_path, 4)
dst_log_html = os.path.join(os.path.expanduser('~'), 'callback.log.html')
copyfile(cb_log_path+'.html', dst_log_html)
if __name__ == '__main__':
path, file = os.path.split(__file__)
sys.path.insert(0, os.path.realpath(os.path.join(path, '..', 'examples')))
examples = {'fmri_fsl_reuse': ['level1_workflow'],
'fmri_spm_nested': ['level1', 'l2pipeline'],
# 'fmri_spm_dartel':['level1','l2pipeline'],
# 'fmri_fsl_feeds':['l1pipeline']
}
example = sys.argv[1]
plugin = sys.argv[2]
data_path = sys.argv[3]
pipelines = sys.argv[4:]
run_examples(example, pipelines, data_path, plugin)
|
bsd-3-clause
|
cleverhans-lab/cleverhans
|
cleverhans/plot/success_fail.py
|
2
|
12713
|
"""Functions for plotting succes-failure curves
Reference: https://openreview.net/forum?id=H1g0piA9tQ
"""
import warnings
import numpy as np
from matplotlib import pyplot
from cleverhans.serial import load
from cleverhans.utils import safe_zip
LINEWIDTH = 2
DEFAULT_SUCCESS_NAME = "clean"
# This must be a tuple or it is not safe to use as a param default
DEFAULT_FAIL_NAMES = ("mc", "bundled")
def plot_report_from_path(
path,
success_name=DEFAULT_SUCCESS_NAME,
fail_names=DEFAULT_FAIL_NAMES,
label=None,
is_max_confidence=True,
linewidth=LINEWIDTH,
plot_upper_bound=True,
):
"""
Plots a success-fail curve from a confidence report stored on disk,
:param path: string filepath for the stored report.
(Should be the output of make_confidence_report*.py)
:param success_name: The name (confidence report key) of the data that
should be used to measure success rate
:param fail_names: A list of names (confidence report keys) of the data
that should be used to measure failure rate.
*Only one of these keys will be plotted*. Each key will be tried in
order until one is found in the report. This is to support both the
output of `make_confidence_report` and `make_confidence_report_bundled`.
:param label: Optional string. Name to use for this curve in the legend.
:param is_max_confidence: bool.
If True, when measuring the failure rate, treat the data as the output
of a maximum confidence attack procedure.
This means that the attack is optimal (assuming the underlying optimizer
is good enough, *which is probably false*, so interpret the plot
accordingly) for thresholds >= .5 but for lower thresholds the observed
failure rate is a lower bound on the true worst failure rate and the
observed coverage is an upper bound (assuming good enough optimization)
on the true failure rate.
The plot thus draws the threshold >= .5 portion of the curve with a solid
line and the upper and lower bounds with a dashed line.
See https://openreview.net/forum?id=H1g0piA9tQ for details.
If False, the attack procedure is regarded as an ad hoc way of obtaining
a loose lower bound, and thus the whole curve is drawn with dashed lines.
:param linewidth: thickness of the line to draw
:param plot_upper_bound: include upper bound on error rate in plot
"""
report = load(path)
plot_report(
report,
success_name,
fail_names,
label,
is_max_confidence,
linewidth,
plot_upper_bound,
)
def plot_report(
report,
success_name,
fail_names,
label=None,
is_max_confidence=True,
linewidth=LINEWIDTH,
plot_upper_bound=True,
):
"""
Plot a success fail curve from a confidence report
:param report: A confidence report
(the type of object saved by make_confidence_report.py)
:param success_name: see plot_report_from_path
:param fail_names: see plot_report_from_path
:param label: see plot_report_from_path
:param is_max_confidence: see plot_report_from_path
:param linewidth: see plot_report_from_path
"""
(
fail_optimal,
success_optimal,
fail_lower_bound,
fail_upper_bound,
success_bounded,
) = make_curve(report, success_name, fail_names)
assert len(fail_lower_bound) == len(fail_upper_bound)
fail_optimal = np.array(fail_optimal)
fail_lower_bound = np.array(fail_lower_bound)
fail_upper_bound = np.array(fail_upper_bound)
if is_max_confidence:
(p,) = pyplot.plot(
fail_optimal, success_optimal, label=label, linewidth=linewidth
)
color = p.get_color()
pyplot.plot(fail_lower_bound, success_bounded, "--", color=color)
if plot_upper_bound:
pyplot.plot(fail_upper_bound, success_bounded, "--", color=color)
else:
# If the attack was not MaxConfidence, then this whole curve is just
# a loose lower bound
all_fail = np.concatenate((fail_optimal, fail_lower_bound), axis=0)
pyplot.plot(
all_fail,
success_optimal + success_bounded,
"--",
label=label,
linewidth=linewidth,
)
pyplot.xlabel("Failure rate on adversarial examples")
pyplot.ylabel("Success rate on clean examples")
gap = fail_upper_bound - fail_lower_bound
if gap.size > 0:
assert gap.min() >= 0.0
print("Max gap: ", gap.max())
def make_curve(report, success_name, fail_names):
"""
Make a success-failure curve.
:param report: A confidence report
(the type of object saved by make_confidence_report.py)
:param success_name: see plot_report_from_path
:param fail_names: see plot_report_from_path
:returns:
fail_optimal: list of failure rates on adversarial data for the optimal
(t >= .5) part of the curve. Each entry corresponds to a different
threshold. Thresholds are chosen to make the smoothest possible curve
from the available data, e.g. one threshold between each unique
confidence value observed in the data. To make sure that linear
interpolation between points in the curve never overestimates the
failure rate for a specific success rate, the curve also includes
extra points that increment the failure rate prior to any point
that increments the success rate, so the curve moves up and to the
right in a series of backwards "L" shapes rather than moving up
and to the right along diagonal lines. For large datasets these
maximally pessimistic points will usually not be visible and the
curve will appear smooth.
success_optimal: list of success rates on clean data on the optimal
part of the curve. Matches up with `fail_optimal`.
fail_lower_bound: list of observed failure rates on the t < .5 portion
of the curve where MaxConfidence is not optimal.
fail_upper_bound: list of upper bounds (assuming good enough optimization,
so not a true upper bound) on the failure rates on the t < .5 portion
of the curve where MaxConfidence is not optimal. Matches up with
`fail_lower_bound`.
success_bounded: success rates on the non-optimal part of the curve.
Matches up with `fail_lower_bound` and `fail_upper_bound`.
"""
success_results = report[success_name]
fail_name = None # pacify pylint
found = False
for fail_name in fail_names:
if fail_name in report:
found = True
break
if not found:
raise ValueError(
fail_name + " not in report." "Available keys: " + str(report.keys())
)
fail_results = report[fail_name]
# "good" means drawn from the distribution where we measure success rate.
# "bad" means drawn from the distribution where we measure failure rate.
# From here on out we use those terms, to avoid confusion between examples
# that actually failed and examples that were drawn from the distribution
# where we measured failure rate.
old_all_probs_version = False
if isinstance(success_results, dict):
# This dictionary key lookup will trigger a deprecation warning if `success_results` is not the old dictionary
# style of report, so we don't want to do a dictionary lookup unless we really are using the old version.
old_all_probs_version = "all_probs" in success_results
if old_all_probs_version:
warnings.warn(
"The 'all_probs' key is included only to support "
" old files from a private development codebase. "
"Support for this key can be dropped at any time "
" without warning."
)
good_probs = success_results["all_probs"]
bad_probs = fail_results["all_probs"]
bad_corrects = fail_results["correctness_mask"]
good_corrects = success_results["correctness_mask"]
else:
if isinstance(success_results, dict):
# Still using dict, but using newer key names
warnings.warn(
"Support for dictionary confidence reports is deprecated. Switch to using the classes in "
"cleverhans.confidence_report. Support for old dictionary-style reports may be removed "
"on or after 2019-07-19."
)
good_probs = success_results["confidence"]
bad_probs = fail_results["confidence"]
good_corrects = success_results["correctness"]
bad_corrects = fail_results["correctness"]
else:
# current version
good_probs = success_results.confidence
bad_probs = fail_results.confidence
good_corrects = success_results.correctness
bad_corrects = fail_results.correctness
good_triplets = [
(prob, correct, True) for prob, correct in safe_zip(good_probs, good_corrects)
]
bad_triplets = [
(prob, correct, False) for prob, correct in safe_zip(bad_probs, bad_corrects)
]
total_good = len(good_triplets)
total_bad = len(bad_triplets)
if total_good != 10000:
warnings.warn(
"Not using full test set? Found "
+ str(total_good)
+ " examples for measuring success rate"
)
if total_bad != 10000:
warnings.warn("Not using full test set for adversarial examples?")
all_triplets = good_triplets + bad_triplets
all_triplets = sorted(all_triplets, key=lambda x: -x[0])
# Start with the case for threshold t = 1.
# Examples are covered only if prob > t (strict inequality)
# So initially nothing is covered
good_covered_and_correct = 0
bad_covered_and_incorrect = 0
# Number of examples that are bad, incorrect, and covered by
# a t >= 0.5, or that were merely covered by a t < 0.5
failure_opportunities = 0
next_idx = 0
fail_optimal = []
success_optimal = []
fail_upper_bound = []
fail_lower_bound = []
success_bounded = []
bounded = False
# NOTE: the loop always exits via an internal break statement.
# Copied the termination condition to the while statement for ease
# of reading.
while next_idx < len(all_triplets):
gs = float(good_covered_and_correct) / total_good
bf = float(bad_covered_and_incorrect) / total_bad
# Add results for current threshold to the list
if not bounded:
# Sometimes when there are big jumps the failure rate it makes
# artifacts in the plot, where there's a long linear track.
# This implies the real success-fail curve is linear when
# actually it just isn't sampled by the data.
# To avoid implying that the model reaches a higher success
# rate than it actually does, we avoid these plotting artifacts
# by introducing extra points that make the graph move horizontally
# to the right first, then vertically.
if len(fail_optimal) > 0:
prev_bf = fail_optimal[-1]
prev_gs = success_optimal[-1]
if gs > prev_gs and bf > prev_bf:
fail_optimal.append(bf)
success_optimal.append(prev_gs)
success_optimal.append(gs)
fail_optimal.append(bf)
else:
success_bounded.append(gs)
fail_lower_bound.append(bf)
fail_upper_bound.append(float(failure_opportunities) / total_bad)
if next_idx == len(all_triplets):
break
# next_prob_to_include is not quite the same thing as the threshold.
# The threshold is infinitesimally smaller than this value.
next_prob_to_include = all_triplets[next_idx][0]
# Process all ties
while next_prob_to_include == all_triplets[next_idx][0]:
_prob, correct, is_good = all_triplets[next_idx]
if is_good:
good_covered_and_correct += correct
else:
if next_prob_to_include <= 0.5:
failure_opportunities += 1
else:
failure_opportunities += 1 - correct
bad_covered_and_incorrect += 1 - correct
next_idx += 1
if next_idx == len(all_triplets):
break
if next_prob_to_include <= 0.5:
bounded = True
out = (
fail_optimal,
success_optimal,
fail_lower_bound,
fail_upper_bound,
success_bounded,
)
return out
|
mit
|
spallavolu/scikit-learn
|
sklearn/linear_model/ransac.py
|
191
|
14261
|
# coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <RansacRegression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] http://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
def fit(self, X, y):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is None:
residual_metric = lambda dy: np.sum(np.abs(dy), axis=1)
else:
residual_metric = self.residual_metric
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
base_estimator.fit(X_subset, y_subset)
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = residual_metric(diff)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
if n_inliers_subset == 0:
raise ValueError("No inliers found, possible cause is "
"setting residual_threshold ({0}) too low.".format(
self.residual_threshold))
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
|
bsd-3-clause
|
sonnyhu/scikit-learn
|
sklearn/cluster/tests/test_k_means.py
|
5
|
31623
|
"""Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.exceptions import DataConversionWarning
from sklearn.metrics.cluster import homogeneity_score
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(DataConversionWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_elkan_results():
rnd = np.random.RandomState(0)
X_normal = rnd.normal(size=(50, 10))
X_blobs, _ = make_blobs(random_state=0)
km_full = KMeans(algorithm='full', n_clusters=5, random_state=0, n_init=1)
km_elkan = KMeans(algorithm='elkan', n_clusters=5,
random_state=0, n_init=1)
for X in [X_normal, X_blobs]:
km_full.fit(X)
km_elkan.fit(X)
assert_array_almost_equal(km_elkan.cluster_centers_,
km_full.cluster_centers_)
assert_array_equal(km_elkan.labels_, km_full.labels_)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
@if_safe_multiprocessing_with_blas
def test_k_means_plus_plus_init_2_jobs():
if sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regex(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regex(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_explicit_init_shape():
# test for sensible errors when giving explicit init
# with wrong number of features or clusters
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 3))
for Class in [KMeans, MiniBatchKMeans]:
# mismatch of number of features
km = Class(n_init=1, init=X[:, :2], n_clusters=len(X))
msg = "does not match the number of features of the data"
assert_raises_regex(ValueError, msg, km.fit, X)
# for callable init
km = Class(n_init=1,
init=lambda X_, k, random_state: X_[:, :2],
n_clusters=len(X))
assert_raises_regex(ValueError, msg, km.fit, X)
# mismatch of number of clusters
msg = "does not match the number of clusters"
km = Class(n_init=1, init=X[:2, :], n_clusters=3)
assert_raises_regex(ValueError, msg, km.fit, X)
# for callable init
km = Class(n_init=1,
init=lambda X_, k, random_state: X_[:2, :],
n_clusters=3)
assert_raises_regex(ValueError, msg, km.fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42, n_clusters=2)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
msg = "does not match the number of clusters"
assert_raises_regex(ValueError, msg, MiniBatchKMeans(init=test_init,
random_state=42).fit,
X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42, n_init=1)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42, n_init=1)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42, n_init=1,
algorithm='elkan')
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42, n_init=1,
algorithm='elkan')
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_int_input():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
for dtype in [np.int32, np.int64]:
X_int = np.array(X_list, dtype=dtype)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
for km in fitted_models:
assert_equal(km.cluster_centers_.dtype, np.float64)
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_predict_equal_labels():
km = KMeans(random_state=13, n_jobs=1, n_init=1, max_iter=1,
algorithm='full')
km.fit(X)
assert_array_equal(km.predict(X), km.labels_)
km = KMeans(random_state=13, n_jobs=1, n_init=1, max_iter=1,
algorithm='elkan')
km.fit(X)
assert_array_equal(km.predict(X), km.labels_)
def test_full_vs_elkan():
km1 = KMeans(algorithm='full', random_state=13)
km2 = KMeans(algorithm='elkan', random_state=13)
km1.fit(X)
km2.fit(X)
homogeneity_score(km1.predict(X), km2.predict(X)) == 1.0
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
def test_x_squared_norms_init_centroids():
"""Test that x_squared_norms can be None in _init_centroids"""
from sklearn.cluster.k_means_ import _init_centroids
X_norms = np.sum(X**2, axis=1)
precompute = _init_centroids(
X, 3, "k-means++", random_state=0, x_squared_norms=X_norms)
assert_array_equal(
precompute,
_init_centroids(X, 3, "k-means++", random_state=0))
def test_max_iter_error():
km = KMeans(max_iter=-1)
assert_raise_message(ValueError, 'Number of iterations should be',
km.fit, X)
def test_float_precision():
km = KMeans(n_init=1, random_state=30)
mb_km = MiniBatchKMeans(n_init=1, random_state=30)
inertia = {}
X_new = {}
centers = {}
for estimator in [km, mb_km]:
for is_sparse in [False, True]:
for dtype in [np.float64, np.float32]:
if is_sparse:
X_test = sp.csr_matrix(X_csr, dtype=dtype)
else:
X_test = dtype(X)
estimator.fit(X_test)
# dtype of cluster centers has to be the dtype of the input data
assert_equal(estimator.cluster_centers_.dtype, dtype)
inertia[dtype] = estimator.inertia_
X_new[dtype] = estimator.transform(X_test)
centers[dtype] = estimator.cluster_centers_
# make sure predictions correspond to the correct label
assert_equal(estimator.predict(X_test[0]), estimator.labels_[0])
if hasattr(estimator, 'partial_fit'):
estimator.partial_fit(X_test[0:3])
# dtype of cluster centers has to stay the same after partial_fit
assert_equal(estimator.cluster_centers_.dtype, dtype)
# compare arrays with low precision since the difference between
# 32 and 64 bit sometimes makes a difference up to the 4th decimal place
assert_array_almost_equal(inertia[np.float32], inertia[np.float64],
decimal=4)
assert_array_almost_equal(X_new[np.float32], X_new[np.float64],
decimal=4)
assert_array_almost_equal(centers[np.float32], centers[np.float64],
decimal=4)
def test_KMeans_init_centers():
# This test is used to check KMeans won't mutate the user provided input array silently
# even if input data and init centers have the same type
X_small = np.array([[1.1, 1.1], [-7.5, -7.5], [-1.1, -1.1], [7.5, 7.5]])
init_centers = np.array([[0.0, 0.0], [5.0, 5.0], [-5.0, -5.0]])
for dtype in [np.int32, np.int64, np.float32, np.float64]:
X_test = dtype(X_small)
init_centers_test = dtype(init_centers)
assert_array_equal(init_centers, init_centers_test)
km = KMeans(init=init_centers_test, n_clusters=3)
km.fit(X_test)
assert_equal(False, np.may_share_memory(km.cluster_centers_, init_centers))
|
bsd-3-clause
|
trankmichael/scikit-learn
|
examples/cluster/plot_mean_shift.py
|
351
|
1793
|
"""
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
|
bsd-3-clause
|
hsuantien/scikit-learn
|
sklearn/neighbors/tests/test_dist_metrics.py
|
230
|
5234
|
import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
|
bsd-3-clause
|
DGrady/pandas
|
pandas/tests/io/test_packers.py
|
7
|
31902
|
import pytest
from warnings import catch_warnings
import os
import datetime
import numpy as np
import sys
from distutils.version import LooseVersion
from pandas import compat
from pandas.compat import u, PY3
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, period_range, Index, Categorical)
from pandas.errors import PerformanceWarning
from pandas.io.packers import to_msgpack, read_msgpack
import pandas.util.testing as tm
from pandas.util.testing import (ensure_clean,
assert_categorical_equal,
assert_frame_equal,
assert_index_equal,
assert_series_equal,
patch)
from pandas.tests.test_panel import assert_panel_equal
import pandas
from pandas import Timestamp, NaT
from pandas._libs.tslib import iNaT
nan = np.nan
try:
import blosc # NOQA
except ImportError:
_BLOSC_INSTALLED = False
else:
_BLOSC_INSTALLED = True
try:
import zlib # NOQA
except ImportError:
_ZLIB_INSTALLED = False
else:
_ZLIB_INSTALLED = True
@pytest.fixture(scope='module')
def current_packers_data():
# our current version packers data
from pandas.tests.io.generate_legacy_storage_files import (
create_msgpack_data)
return create_msgpack_data()
@pytest.fixture(scope='module')
def all_packers_data():
# our all of our current version packers data
from pandas.tests.io.generate_legacy_storage_files import (
create_data)
return create_data()
def check_arbitrary(a, b):
if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
assert(len(a) == len(b))
for a_, b_ in zip(a, b):
check_arbitrary(a_, b_)
elif isinstance(a, Panel):
assert_panel_equal(a, b)
elif isinstance(a, DataFrame):
assert_frame_equal(a, b)
elif isinstance(a, Series):
assert_series_equal(a, b)
elif isinstance(a, Index):
assert_index_equal(a, b)
elif isinstance(a, Categorical):
# Temp,
# Categorical.categories is changed from str to bytes in PY3
# maybe the same as GH 13591
if PY3 and b.categories.inferred_type == 'string':
pass
else:
tm.assert_categorical_equal(a, b)
elif a is NaT:
assert b is NaT
elif isinstance(a, Timestamp):
assert a == b
assert a.freq == b.freq
else:
assert(a == b)
class TestPackers(object):
def setup_method(self, method):
self.path = '__%s__.msg' % tm.rands(10)
def teardown_method(self, method):
pass
def encode_decode(self, x, compress=None, **kwargs):
with ensure_clean(self.path) as p:
to_msgpack(p, x, compress=compress, **kwargs)
return read_msgpack(p, **kwargs)
class TestAPI(TestPackers):
def test_string_io(self):
df = DataFrame(np.random.randn(10, 2))
s = df.to_msgpack(None)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(compat.BytesIO(s))
tm.assert_frame_equal(result, df)
s = to_msgpack(None, df)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
with ensure_clean(self.path) as p:
s = df.to_msgpack()
fh = open(p, 'wb')
fh.write(s)
fh.close()
result = read_msgpack(p)
tm.assert_frame_equal(result, df)
def test_path_pathlib(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_msgpack, read_msgpack)
tm.assert_frame_equal(df, result)
def test_path_localpath(self):
df = tm.makeDataFrame()
result = tm.round_trip_localpath(df.to_msgpack, read_msgpack)
tm.assert_frame_equal(df, result)
def test_iterator_with_string_io(self):
dfs = [DataFrame(np.random.randn(10, 2)) for i in range(5)]
s = to_msgpack(None, *dfs)
for i, result in enumerate(read_msgpack(s, iterator=True)):
tm.assert_frame_equal(result, dfs[i])
def test_invalid_arg(self):
# GH10369
class A(object):
def __init__(self):
self.read = 0
pytest.raises(ValueError, read_msgpack, path_or_buf=None)
pytest.raises(ValueError, read_msgpack, path_or_buf={})
pytest.raises(ValueError, read_msgpack, path_or_buf=A())
class TestNumpy(TestPackers):
def test_numpy_scalar_float(self):
x = np.float32(np.random.rand())
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_scalar_complex(self):
x = np.complex64(np.random.rand() + 1j * np.random.rand())
x_rec = self.encode_decode(x)
assert np.allclose(x, x_rec)
def test_scalar_float(self):
x = np.random.rand()
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_scalar_complex(self):
x = np.random.rand() + 1j * np.random.rand()
x_rec = self.encode_decode(x)
assert np.allclose(x, x_rec)
def test_list_numpy_float(self):
x = [np.float32(np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_numpy_float_complex(self):
if not hasattr(np, 'complex128'):
pytest.skip('numpy cant handle complex128')
x = [np.float32(np.random.rand()) for i in range(5)] + \
[np.complex128(np.random.rand() + 1j * np.random.rand())
for i in range(5)]
x_rec = self.encode_decode(x)
assert np.allclose(x, x_rec)
def test_list_float(self):
x = [np.random.rand() for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_float_complex(self):
x = [np.random.rand() for i in range(5)] + \
[(np.random.rand() + 1j * np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
assert np.allclose(x, x_rec)
def test_dict_float(self):
x = {'foo': 1.0, 'bar': 2.0}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_complex(self):
x = {'foo': 1.0 + 1.0j, 'bar': 2.0 + 2.0j}
x_rec = self.encode_decode(x)
tm.assert_dict_equal(x, x_rec)
for key in x:
tm.assert_class_equal(x[key], x_rec[key], obj="complex value")
def test_dict_numpy_float(self):
x = {'foo': np.float32(1.0), 'bar': np.float32(2.0)}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_numpy_complex(self):
x = {'foo': np.complex128(1.0 + 1.0j),
'bar': np.complex128(2.0 + 2.0j)}
x_rec = self.encode_decode(x)
tm.assert_dict_equal(x, x_rec)
for key in x:
tm.assert_class_equal(x[key], x_rec[key], obj="numpy complex128")
def test_numpy_array_float(self):
# run multiple times
for n in range(10):
x = np.random.rand(10)
for dtype in ['float32', 'float64']:
x = x.astype(dtype)
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_array_complex(self):
x = (np.random.rand(5) + 1j * np.random.rand(5)).astype(np.complex128)
x_rec = self.encode_decode(x)
assert (all(map(lambda x, y: x == y, x, x_rec)) and
x.dtype == x_rec.dtype)
def test_list_mixed(self):
x = [1.0, np.float32(3.5), np.complex128(4.25), u('foo')]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
class TestBasic(TestPackers):
def test_timestamp(self):
for i in [Timestamp(
'20130101'), Timestamp('20130101', tz='US/Eastern'),
Timestamp('201301010501')]:
i_rec = self.encode_decode(i)
assert i == i_rec
def test_nat(self):
nat_rec = self.encode_decode(NaT)
assert NaT is nat_rec
def test_datetimes(self):
# fails under 2.6/win32 (np.datetime64 seems broken)
if LooseVersion(sys.version) < '2.7':
pytest.skip('2.6 with np.datetime64 is broken')
for i in [datetime.datetime(2013, 1, 1),
datetime.datetime(2013, 1, 1, 5, 1),
datetime.date(2013, 1, 1),
np.datetime64(datetime.datetime(2013, 1, 5, 2, 15))]:
i_rec = self.encode_decode(i)
assert i == i_rec
def test_timedeltas(self):
for i in [datetime.timedelta(days=1),
datetime.timedelta(days=1, seconds=10),
np.timedelta64(1000000)]:
i_rec = self.encode_decode(i)
assert i == i_rec
class TestIndex(TestPackers):
def setup_method(self, method):
super(TestIndex, self).setup_method(method)
self.d = {
'string': tm.makeStringIndex(100),
'date': tm.makeDateIndex(100),
'int': tm.makeIntIndex(100),
'rng': tm.makeRangeIndex(100),
'float': tm.makeFloatIndex(100),
'empty': Index([]),
'tuple': Index(zip(['foo', 'bar', 'baz'], [1, 2, 3])),
'period': Index(period_range('2012-1-1', freq='M', periods=3)),
'date2': Index(date_range('2013-01-1', periods=10)),
'bdate': Index(bdate_range('2013-01-02', periods=10)),
'cat': tm.makeCategoricalIndex(100)
}
self.mi = {
'reg': MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'),
('foo', 'two'),
('qux', 'one'), ('qux', 'two')],
names=['first', 'second']),
}
def test_basic_index(self):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
tm.assert_index_equal(i, i_rec)
# datetime with no freq (GH5506)
i = Index([Timestamp('20130101'), Timestamp('20130103')])
i_rec = self.encode_decode(i)
tm.assert_index_equal(i, i_rec)
# datetime with timezone
i = Index([Timestamp('20130101 9:00:00'), Timestamp(
'20130103 11:00:00')]).tz_localize('US/Eastern')
i_rec = self.encode_decode(i)
tm.assert_index_equal(i, i_rec)
def test_multi_index(self):
for s, i in self.mi.items():
i_rec = self.encode_decode(i)
tm.assert_index_equal(i, i_rec)
def test_unicode(self):
i = tm.makeUnicodeIndex(100)
i_rec = self.encode_decode(i)
tm.assert_index_equal(i, i_rec)
def categorical_index(self):
# GH15487
df = DataFrame(np.random.randn(10, 2))
df = df.astype({0: 'category'}).set_index(0)
result = self.encode_decode(df)
tm.assert_frame_equal(result, df)
class TestSeries(TestPackers):
def setup_method(self, method):
super(TestSeries, self).setup_method(method)
self.d = {}
s = tm.makeStringSeries()
s.name = 'string'
self.d['string'] = s
s = tm.makeObjectSeries()
s.name = 'object'
self.d['object'] = s
s = Series(iNaT, dtype='M8[ns]', index=range(5))
self.d['date'] = s
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
'F': [Timestamp('20130102', tz='US/Eastern')] * 2 +
[Timestamp('20130603', tz='CET')] * 3,
'G': [Timestamp('20130102', tz='US/Eastern')] * 5,
'H': Categorical([1, 2, 3, 4, 5]),
'I': Categorical([1, 2, 3, 4, 5], ordered=True),
}
self.d['float'] = Series(data['A'])
self.d['int'] = Series(data['B'])
self.d['mixed'] = Series(data['E'])
self.d['dt_tz_mixed'] = Series(data['F'])
self.d['dt_tz'] = Series(data['G'])
self.d['cat_ordered'] = Series(data['H'])
self.d['cat_unordered'] = Series(data['I'])
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_series_equal(i, i_rec)
class TestCategorical(TestPackers):
def setup_method(self, method):
super(TestCategorical, self).setup_method(method)
self.d = {}
self.d['plain_str'] = Categorical(['a', 'b', 'c', 'd', 'e'])
self.d['plain_str_ordered'] = Categorical(['a', 'b', 'c', 'd', 'e'],
ordered=True)
self.d['plain_int'] = Categorical([5, 6, 7, 8])
self.d['plain_int_ordered'] = Categorical([5, 6, 7, 8], ordered=True)
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_categorical_equal(i, i_rec)
class TestNDFrame(TestPackers):
def setup_method(self, method):
super(TestNDFrame, self).setup_method(method)
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
'F': [Timestamp('20130102', tz='US/Eastern')] * 5,
'G': [Timestamp('20130603', tz='CET')] * 5,
'H': Categorical(['a', 'b', 'c', 'd', 'e']),
'I': Categorical(['a', 'b', 'c', 'd', 'e'], ordered=True),
}
self.frame = {
'float': DataFrame(dict(A=data['A'], B=Series(data['A']) + 1)),
'int': DataFrame(dict(A=data['B'], B=Series(data['B']) + 1)),
'mixed': DataFrame(data)}
with catch_warnings(record=True):
self.panel = {
'float': Panel(dict(ItemA=self.frame['float'],
ItemB=self.frame['float'] + 1))}
def test_basic_frame(self):
for s, i in self.frame.items():
i_rec = self.encode_decode(i)
assert_frame_equal(i, i_rec)
def test_basic_panel(self):
with catch_warnings(record=True):
for s, i in self.panel.items():
i_rec = self.encode_decode(i)
assert_panel_equal(i, i_rec)
def test_multi(self):
i_rec = self.encode_decode(self.frame)
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
l = tuple([self.frame['float'], self.frame['float'].A,
self.frame['float'].B, None])
l_rec = self.encode_decode(l)
check_arbitrary(l, l_rec)
# this is an oddity in that packed lists will be returned as tuples
l = [self.frame['float'], self.frame['float']
.A, self.frame['float'].B, None]
l_rec = self.encode_decode(l)
assert isinstance(l_rec, tuple)
check_arbitrary(l, l_rec)
def test_iterator(self):
l = [self.frame['float'], self.frame['float']
.A, self.frame['float'].B, None]
with ensure_clean(self.path) as path:
to_msgpack(path, *l)
for i, packed in enumerate(read_msgpack(path, iterator=True)):
check_arbitrary(packed, l[i])
def tests_datetimeindex_freq_issue(self):
# GH 5947
# inferring freq on the datetimeindex
df = DataFrame([1, 2, 3], index=date_range('1/1/2013', '1/3/2013'))
result = self.encode_decode(df)
assert_frame_equal(result, df)
df = DataFrame([1, 2], index=date_range('1/1/2013', '1/2/2013'))
result = self.encode_decode(df)
assert_frame_equal(result, df)
def test_dataframe_duplicate_column_names(self):
# GH 9618
expected_1 = DataFrame(columns=['a', 'a'])
expected_2 = DataFrame(columns=[1] * 100)
expected_2.loc[0] = np.random.randn(100)
expected_3 = DataFrame(columns=[1, 1])
expected_3.loc[0] = ['abc', np.nan]
result_1 = self.encode_decode(expected_1)
result_2 = self.encode_decode(expected_2)
result_3 = self.encode_decode(expected_3)
assert_frame_equal(result_1, expected_1)
assert_frame_equal(result_2, expected_2)
assert_frame_equal(result_3, expected_3)
class TestSparse(TestPackers):
def _check_roundtrip(self, obj, comparator, **kwargs):
# currently these are not implemetned
# i_rec = self.encode_decode(obj)
# comparator(obj, i_rec, **kwargs)
pytest.raises(NotImplementedError, self.encode_decode, obj)
def test_sparse_series(self):
s = tm.makeStringSeries()
s[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal,
check_series_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_series_equal,
check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.loc[3:5, 1:3] = np.nan
s.loc[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_frame_equal,
check_frame_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_frame_equal,
check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_frame_equal,
check_frame_type=True)
class TestCompression(TestPackers):
"""See https://github.com/pandas-dev/pandas/pull/9783
"""
def setup_method(self, method):
try:
from sqlalchemy import create_engine
self._create_sql_engine = create_engine
except ImportError:
self._SQLALCHEMY_INSTALLED = False
else:
self._SQLALCHEMY_INSTALLED = True
super(TestCompression, self).setup_method(method)
data = {
'A': np.arange(1000, dtype=np.float64),
'B': np.arange(1000, dtype=np.int32),
'C': list(100 * 'abcdefghij'),
'D': date_range(datetime.datetime(2015, 4, 1), periods=1000),
'E': [datetime.timedelta(days=x) for x in range(1000)],
}
self.frame = {
'float': DataFrame(dict((k, data[k]) for k in ['A', 'A'])),
'int': DataFrame(dict((k, data[k]) for k in ['B', 'B'])),
'mixed': DataFrame(data),
}
def test_plain(self):
i_rec = self.encode_decode(self.frame)
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
def _test_compression(self, compress):
i_rec = self.encode_decode(self.frame, compress=compress)
for k in self.frame.keys():
value = i_rec[k]
expected = self.frame[k]
assert_frame_equal(value, expected)
# make sure that we can write to the new frames
for block in value._data.blocks:
assert block.values.flags.writeable
def test_compression_zlib(self):
if not _ZLIB_INSTALLED:
pytest.skip('no zlib')
self._test_compression('zlib')
def test_compression_blosc(self):
if not _BLOSC_INSTALLED:
pytest.skip('no blosc')
self._test_compression('blosc')
def _test_compression_warns_when_decompress_caches(self, compress):
not_garbage = []
control = [] # copied data
compress_module = globals()[compress]
real_decompress = compress_module.decompress
def decompress(ob):
"""mock decompress function that delegates to the real
decompress but caches the result and a copy of the result.
"""
res = real_decompress(ob)
not_garbage.append(res) # hold a reference to this bytes object
control.append(bytearray(res)) # copy the data here to check later
return res
# types mapped to values to add in place.
rhs = {
np.dtype('float64'): 1.0,
np.dtype('int32'): 1,
np.dtype('object'): 'a',
np.dtype('datetime64[ns]'): np.timedelta64(1, 'ns'),
np.dtype('timedelta64[ns]'): np.timedelta64(1, 'ns'),
}
with patch(compress_module, 'decompress', decompress), \
tm.assert_produces_warning(PerformanceWarning) as ws:
i_rec = self.encode_decode(self.frame, compress=compress)
for k in self.frame.keys():
value = i_rec[k]
expected = self.frame[k]
assert_frame_equal(value, expected)
# make sure that we can write to the new frames even though
# we needed to copy the data
for block in value._data.blocks:
assert block.values.flags.writeable
# mutate the data in some way
block.values[0] += rhs[block.dtype]
for w in ws:
# check the messages from our warnings
assert str(w.message) == ('copying data after decompressing; '
'this may mean that decompress is '
'caching its result')
for buf, control_buf in zip(not_garbage, control):
# make sure none of our mutations above affected the
# original buffers
assert buf == control_buf
def test_compression_warns_when_decompress_caches_zlib(self):
if not _ZLIB_INSTALLED:
pytest.skip('no zlib')
self._test_compression_warns_when_decompress_caches('zlib')
def test_compression_warns_when_decompress_caches_blosc(self):
if not _BLOSC_INSTALLED:
pytest.skip('no blosc')
self._test_compression_warns_when_decompress_caches('blosc')
def _test_small_strings_no_warn(self, compress):
empty = np.array([], dtype='uint8')
with tm.assert_produces_warning(None):
empty_unpacked = self.encode_decode(empty, compress=compress)
tm.assert_numpy_array_equal(empty_unpacked, empty)
assert empty_unpacked.flags.writeable
char = np.array([ord(b'a')], dtype='uint8')
with tm.assert_produces_warning(None):
char_unpacked = self.encode_decode(char, compress=compress)
tm.assert_numpy_array_equal(char_unpacked, char)
assert char_unpacked.flags.writeable
# if this test fails I am sorry because the interpreter is now in a
# bad state where b'a' points to 98 == ord(b'b').
char_unpacked[0] = ord(b'b')
# we compare the ord of bytes b'a' with unicode u'a' because the should
# always be the same (unless we were able to mutate the shared
# character singleton in which case ord(b'a') == ord(b'b').
assert ord(b'a') == ord(u'a')
tm.assert_numpy_array_equal(
char_unpacked,
np.array([ord(b'b')], dtype='uint8'),
)
def test_small_strings_no_warn_zlib(self):
if not _ZLIB_INSTALLED:
pytest.skip('no zlib')
self._test_small_strings_no_warn('zlib')
def test_small_strings_no_warn_blosc(self):
if not _BLOSC_INSTALLED:
pytest.skip('no blosc')
self._test_small_strings_no_warn('blosc')
def test_readonly_axis_blosc(self):
# GH11880
if not _BLOSC_INSTALLED:
pytest.skip('no blosc')
df1 = DataFrame({'A': list('abcd')})
df2 = DataFrame(df1, index=[1., 2., 3., 4.])
assert 1 in self.encode_decode(df1['A'], compress='blosc')
assert 1. in self.encode_decode(df2['A'], compress='blosc')
def test_readonly_axis_zlib(self):
# GH11880
df1 = DataFrame({'A': list('abcd')})
df2 = DataFrame(df1, index=[1., 2., 3., 4.])
assert 1 in self.encode_decode(df1['A'], compress='zlib')
assert 1. in self.encode_decode(df2['A'], compress='zlib')
def test_readonly_axis_blosc_to_sql(self):
# GH11880
if not _BLOSC_INSTALLED:
pytest.skip('no blosc')
if not self._SQLALCHEMY_INSTALLED:
pytest.skip('no sqlalchemy')
expected = DataFrame({'A': list('abcd')})
df = self.encode_decode(expected, compress='blosc')
eng = self._create_sql_engine("sqlite:///:memory:")
df.to_sql('test', eng, if_exists='append')
result = pandas.read_sql_table('test', eng, index_col='index')
result.index.names = [None]
assert_frame_equal(expected, result)
def test_readonly_axis_zlib_to_sql(self):
# GH11880
if not _ZLIB_INSTALLED:
pytest.skip('no zlib')
if not self._SQLALCHEMY_INSTALLED:
pytest.skip('no sqlalchemy')
expected = DataFrame({'A': list('abcd')})
df = self.encode_decode(expected, compress='zlib')
eng = self._create_sql_engine("sqlite:///:memory:")
df.to_sql('test', eng, if_exists='append')
result = pandas.read_sql_table('test', eng, index_col='index')
result.index.names = [None]
assert_frame_equal(expected, result)
class TestEncoding(TestPackers):
def setup_method(self, method):
super(TestEncoding, self).setup_method(method)
data = {
'A': [compat.u('\u2019')] * 1000,
'B': np.arange(1000, dtype=np.int32),
'C': list(100 * 'abcdefghij'),
'D': date_range(datetime.datetime(2015, 4, 1), periods=1000),
'E': [datetime.timedelta(days=x) for x in range(1000)],
'G': [400] * 1000
}
self.frame = {
'float': DataFrame(dict((k, data[k]) for k in ['A', 'A'])),
'int': DataFrame(dict((k, data[k]) for k in ['B', 'B'])),
'mixed': DataFrame(data),
}
self.utf_encodings = ['utf8', 'utf16', 'utf32']
def test_utf(self):
# GH10581
for encoding in self.utf_encodings:
for frame in compat.itervalues(self.frame):
result = self.encode_decode(frame, encoding=encoding)
assert_frame_equal(result, frame)
def test_default_encoding(self):
for frame in compat.itervalues(self.frame):
result = frame.to_msgpack()
expected = frame.to_msgpack(encoding='utf8')
assert result == expected
result = self.encode_decode(frame)
assert_frame_equal(result, frame)
def legacy_packers_versions():
# yield the packers versions
path = tm.get_data_path('legacy_msgpack')
for v in os.listdir(path):
p = os.path.join(path, v)
if os.path.isdir(p):
yield v
class TestMsgpack(object):
"""
How to add msgpack tests:
1. Install pandas version intended to output the msgpack.
TestPackers
2. Execute "generate_legacy_storage_files.py" to create the msgpack.
$ python generate_legacy_storage_files.py <output_dir> msgpack
3. Move the created pickle to "data/legacy_msgpack/<version>" directory.
"""
minimum_structure = {'series': ['float', 'int', 'mixed',
'ts', 'mi', 'dup'],
'frame': ['float', 'int', 'mixed', 'mi'],
'panel': ['float'],
'index': ['int', 'date', 'period'],
'mi': ['reg2']}
def check_min_structure(self, data, version):
for typ, v in self.minimum_structure.items():
assert typ in data, '"{0}" not found in unpacked data'.format(typ)
for kind in v:
msg = '"{0}" not found in data["{1}"]'.format(kind, typ)
assert kind in data[typ], msg
def compare(self, current_data, all_data, vf, version):
# GH12277 encoding default used to be latin-1, now utf-8
if LooseVersion(version) < '0.18.0':
data = read_msgpack(vf, encoding='latin-1')
else:
data = read_msgpack(vf)
self.check_min_structure(data, version)
for typ, dv in data.items():
assert typ in all_data, ('unpacked data contains '
'extra key "{0}"'
.format(typ))
for dt, result in dv.items():
assert dt in current_data[typ], ('data["{0}"] contains extra '
'key "{1}"'.format(typ, dt))
try:
expected = current_data[typ][dt]
except KeyError:
continue
# use a specific comparator
# if available
comp_method = "compare_{typ}_{dt}".format(typ=typ, dt=dt)
comparator = getattr(self, comp_method, None)
if comparator is not None:
comparator(result, expected, typ, version)
else:
check_arbitrary(result, expected)
return data
def compare_series_dt_tz(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
if LooseVersion(version) < '0.17.0':
expected = expected.astype(object)
tm.assert_series_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
def compare_frame_dt_mixed_tzs(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
if LooseVersion(version) < '0.17.0':
expected = expected.astype(object)
tm.assert_frame_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('version', legacy_packers_versions())
def test_msgpacks_legacy(self, current_packers_data, all_packers_data,
version):
pth = tm.get_data_path('legacy_msgpack/{0}'.format(version))
n = 0
for f in os.listdir(pth):
# GH12142 0.17 files packed in P2 can't be read in P3
if (compat.PY3 and version.startswith('0.17.') and
f.split('.')[-4][-1] == '2'):
continue
vf = os.path.join(pth, f)
try:
with catch_warnings(record=True):
self.compare(current_packers_data, all_packers_data,
vf, version)
except ImportError:
# blosc not installed
continue
n += 1
assert n > 0, 'Msgpack files are not tested'
|
bsd-3-clause
|
JanetMatsen/bacteriopop
|
dynamic_mode_decomposition.py
|
1
|
10700
|
# Dynamic Mode Decomposition based on http://arxiv.org/pdf/1312.0041v1.pdf
import numpy as np
import pandas as pd
from sklearn.preprocessing import normalize
from bacteriopop_utils import prepare_DMD_matrices
def find_fixed_adjacency_matrix(min_abundance=0.0, phylo_column='order',
full_svd=True):
"""
This function find the adjacency matrix among clusters of bacteria over
the 11 weeks of sampling assuming the interaction between clusters is
fixed.
It creates a dictionary of descriptive tuples like ("High", 2) for
high-oxygen week 2, and corresponding dataframe values. These
dataframes have weeks as columns and taxa ("bacteria") as rows.
Unlike find_temporal_adjacency_matrix(), we get only one predictive
matrix that represents the 10 transitions between sampling points.
Since the dictionary has 8 tuple keys for High/Low oxygen and 4
replicates for each condition, 8 interaction ("A") matrices are created.
These are accessed by the dictionary linear_mappings, with the same
tuples as keys.
The names of each node can be accessed by nodes_list, the other output.
:param min_abundance: minimum abundance to loook for in original data
:param phylo_column: most detailed phylogenetic column to consider
:param full_svd: if True, runs the full svd algorithm. If False,
runs a faster version.
"""
# Default values
if min_abundance is None:
min_abundance = 0
if phylo_column is None:
phylo_column = 'order'
if full_svd is None:
full_svd = False
# snapshots of samples over 11 weeks
snapshots = prepare_DMD_matrices(min_abundance, phylo_column, oxygen='all',debug=False)
linear_mappings = {}
nodes_list = {}
for descriptive_tuple in snapshots.keys():
df = snapshots[descriptive_tuple]
data = df.values
X = data[:, 0:10]
Y = data[:, 1:11]
# Preprocess the abundance data
X = normalize(X, axis=0)
Y = normalize(Y, axis=0)
U, s, V = np.linalg.svd(X, full_matrices=full_svd)
if full_svd is True: # slower
S = np.zeros((len(U), len(s)), dtype=float)
S[:len(s), :len(s)] = np.diag(s)
pseu_inv_x = np.dot(np.linalg.inv(V),
np.dot(np.linalg.pinv(S), np.linalg.inv(U)))
else: # faster
S = np.diag(s)
pseu_inv_x = np.dot(np.linalg.inv(V),
np.dot(np.linalg.inv(S), np.linalg.pinv(U)))
# Adjacency matrix between clusters
A = np.dot(Y, pseu_inv_x)
# A = np.dot(Y, np.linalg.pinv(X)) # full SVD (slower)
linear_mappings[descriptive_tuple] = A
nodes_list[descriptive_tuple] = list(df.index)
return linear_mappings, nodes_list
def adjacency_matrix_into_pandas(mappings_array, row_and_colnames):
"""
Turn one matrix with one set of labels into a Pandas DataFrame with
index (row) names set to row_and_colnames as well has column names set
to row_and_colnames.
:param mappings_array: numpy matrix produced from ___
:param row_and_colnames: numpy array of names produced by ___
:return: one Pandas DataFrame with row and column names.
"""
# Goal: return a Pandas DataFrame with suitable labels by combining the
# linear_mappings and nodes_list outputs of find_fixed_adjacency_matrix().
# todo: which labels do we use? So far labels are things like:
# Bacteria,Proteobacteria,Gammaproteobacteria,Pseudomonadales
# and sometimes
# unassigned,,, <-- when the taxonomy was not fully specified.
# for now just return the long strings:
return pd.DataFrame(mappings_array,
columns=row_and_colnames,
index=row_and_colnames)
def DMD_results_dict_from_numpy_to_pandas(adj_dict, node_name_dict):
# transform our dict of descriptive tuple:numpy array pairs into a dict of
# descriptive tuple:pandas dataframe dict.
# assert that the set of keys in both inputs match.
assert (set(adj_dict.keys())== set(node_name_dict.keys()))
dict_with_dataframe_values = {}
for key in adj_dict.keys():
np_to_pd = adjacency_matrix_into_pandas(adj_dict[key],
node_name_dict[key])
dict_with_dataframe_values[key] = np_to_pd
return dict_with_dataframe_values
def find_temporal_adjacency_matrix(min_abundance, phylo_column, full_svd):
"""
Find the adjacency matrix among clusters of bacteria from week to week,
assuming the interaction between clusters is changing.
:param min_abundance: ignore the bacteria if their abundance is always
below the min_abundance
:param phylo_column: the data is clustered based on the phylo_column
:param full_svd:the method of singular value decomposition. full SVD is
more accurate and slower than the reduced SVD
"""
# Default values
if min_abundance is None:
min_abundance = 0
if phylo_column is None:
phylo_column = 'family'
if full_svd is None:
full_svd = False
# snapshots of samples over 11 weeks
# todo: python reserves capital letters for classes.
snapshots = prepare_DMD_matrices(min_abundance, phylo_column, oxygen='all', debug=False)
linear_mappings = {}
nodes_list = {}
for descriptive_tuple in snapshots.keys():
df = snapshots[descriptive_tuple]
data = df.values
for time in range(10):
X = data[:, time:time+1]
Y = data[:, time+1:time+2]
# Preprocess the abundance data
X = normalize(X, axis=0)
Y = normalize(Y, axis=0)
U, s, V = np.linalg.svd(X, full_matrices=full_svd)
if full_svd is True: # slower
S = np.zeros((len(U), len(s)), dtype=complex)
S[:len(s), :len(s)] = np.diag(s)
pseu_inv_x = np.dot(np.linalg.inv(V),
np.dot(np.linalg.pinv(S), np.linalg.inv(U)))
else: # faster
S = np.diag(s)
pseu_inv_x = np.dot(np.linalg.inv(V),
np.dot(np.linalg.inv(S), np.linalg.pinv(U)))
# Adjacency matrix between clusters
A = np.dot(Y, pseu_inv_x)
# A = np.dot(Y, np.linalg.pinv(X)) # full SVD (slower)
key = descriptive_tuple + ('Week ' + str(time+1),)
linear_mappings[key] = A
nodes_list[key] = list(df.index)
return linear_mappings, nodes_list
def aggregate_adjacency_matrix_over_replicates(mappings):
"""
:param mappings: a python dictionarys of pandas data frame that contains
the adjacency matrices for all 8 replicates including
4 high O2 and 4 low O2
:return:
avg_mappings: a dictionary of pandas data frame for low and high
replicates mean
std_mappings: a dictionary of pandas data frame for low and high
replicates standard deviation
snr_mappings: a dictionary of pandas data frame for low and high
replicates signal to noise ratio
"""
std_mappings = {}
avg_mappings = {}
snr_mappings = {}
current_nodes = {}
high_rep_mappings = []
low_rep_mappings = []
current_nodes['Low'] = set([])
current_nodes['High'] = set([])
# create two lists, one for each high or low replicates including all
# labels observed in replicates
for key in mappings.keys():
if key[0] == "High":
current_nodes['High'] = \
current_nodes['High'].union(mappings[key].index)
else:
current_nodes['Low'] = \
current_nodes['Low'].union(mappings[key].index)
# add the missing label to each replicate
for key in mappings.keys():
if key[0] == "High":
for id in current_nodes['High']:
if id not in mappings[key].index:
# add one column
mappings[key][id] = [0.0]*len(mappings[key].index)
# add one row
mappings[key].loc[id] = \
[0.0]*len(mappings[key].columns)
# sort the index and columns labels of data frame in order to
# have an identical ordering in the adjacency matrix
mappings[key] = mappings[key].sort_index(axis=1)
mappings[key] = mappings[key].sort_index()
high_rep_mappings.append(mappings[key].values)
else:
for id in current_nodes['Low']:
if id not in mappings[key].index:
# add one column
mappings[key][id] = [0.0]*len(mappings[key].index)
# add one column
mappings[key].loc[id] = \
[0.0]*len(mappings[key].columns)
# sort the index and columns labels of data frame in order to have
# an identical ordering in the adjacency matrix
mappings[key] = mappings[key].sort_index(axis=1)
mappings[key] = mappings[key].sort_index()
low_rep_mappings.append(mappings[key].values)
# find the element by element average of adjacency matrix over replicates
# of high/low O2
avg_mappings['High'] = np.mean(high_rep_mappings, axis=0)
avg_mappings['Low'] = np.mean(low_rep_mappings, axis=0)
# convert from numpy array to pandas dataframe
avg_mappings = DMD_results_dict_from_numpy_to_pandas(avg_mappings,
current_nodes)
# find the element by element STD of adjacency matrix over replicates of
# high/low O2
std_mappings['High'] = np.std(high_rep_mappings, axis=0, ddof=1)
std_mappings['Low'] = np.std(low_rep_mappings, axis=0, ddof=1)
# convert from numpy array to pandas dataframe
std_mappings = DMD_results_dict_from_numpy_to_pandas(std_mappings,
current_nodes)
# find the element by element SNR of adjacency matrix over replicates of
# high/low O2
snr_mappings['High'] = avg_mappings['High']/std_mappings['High']
snr_mappings['Low'] = avg_mappings['Low'] / std_mappings['Low']
# convert from numpy array to pandas dataframe
snr_mappings = DMD_results_dict_from_numpy_to_pandas(snr_mappings,
current_nodes)
return std_mappings, avg_mappings, snr_mappings
|
apache-2.0
|
brianlorenz/COSMOS_IMACS_Redshifts
|
PlotCodes/Plot_AvStructMassCut_paper.py
|
1
|
11586
|
#Creates a BPT diagram for all objects, and a second figure that shows objects for which single lines are low
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii
import sys, os, string
import pandas as pd
from astropy.io import fits
import collections
from astropy.cosmology import WMAP9 as cosmo
from astropy.stats import biweight_midvariance
#Folder to save the figures
figout = '/Users/blorenz/COSMOS/Reports/2018/Images/'
#The location with the file for all of our data
fluxdatapath = '/Users/blorenz/COSMOS/COSMOSData/lineflux_red.txt'
#Location of the equivalent width data
ewdata = '/Users/blorenz/COSMOS/COSMOSData/lineew.txt'
#Read in the ew of the lines
ew_df = ascii.read(ewdata).to_pandas()
#The location to store the scale and its stddev of each line
qualdatapath = '/Users/blorenz/COSMOS/COSMOSData/dataqual.txt'
#Read in the scale of the lines
dataqual = ascii.read(qualdatapath).to_pandas()
d = {'True': True, 'False': False}
#File with the error array
errdatapath = '/Users/blorenz/COSMOS/COSMOSData/errs.txt'
#Read in the scale of the lines
err_df = ascii.read(errdatapath,data_start=1,header_start=0,format='csv').to_pandas()
#Read the datafile:
fluxdata = ascii.read(fluxdatapath).to_pandas()
#File with the structural properties
spropdatapath = '/Users/blorenz/COSMOS/COSMOSData/struct_prop.txt'
#Read in the scale of the lines
sprop_df = ascii.read(spropdatapath).to_pandas()
sprop_df = sprop_df.rename(columns={'id':'OBJID'})
fluxdata = pd.merge(fluxdata,sprop_df)
#Read in the sfr file
sfdata = '/Users/blorenz/COSMOS/COSMOSData/sfrs.txt'
sfr_df = ascii.read(sfdata).to_pandas()
fluxdata = pd.merge(fluxdata,sfr_df,on='fluxfile')
#Fontsizes for plotting
axisfont = 24
ticksize = 18
ticks = 8
titlefont = 24
legendfont = 16
textfont = 16
#Division function
def divz(X,Y):
return X/np.where(Y,Y,Y+1)*np.not_equal(Y,0)
lines=['6563_fix','4861']
#Filter the data
goodlines = [dataqual[line+'_good'].map(d) for line in lines]
#Needs to be good in all lines to be good
allgood = np.logical_and.reduce(goodlines)
#Needs to be bad in any line to be bad
badlines = [dataqual[line+'_bad'].map(d) for line in lines]
baddata = np.logical_or.reduce(badlines)
lowlines = [dataqual[line+'_low'].map(d) for line in lines]
#Needs to be low in any line to be low, and also not bad in a line
somelow = np.logical_and(np.logical_or.reduce(lowlines),np.logical_not(baddata))
combinemass = 1
if not combinemass:
fig,axarr = plt.subplots(2,3,figsize=(24,15),sharex=False,sharey=False)
axarr = np.reshape(axarr,6)
else:
fig,axarr = plt.subplots(2,2,figsize=(16,15),sharex=False,sharey=False)
axarr = np.reshape(axarr,4)
#Gets rid of objects with bad ellipticities
filtar = fluxdata['ar']>0
#Plot the data with error bars
#Counter
c = 0
plotdata = 'ar'
ylabel = 'b/a'
savename = 'AxisRatio'
#fluxdata['n']=np.log10(fluxdata['n'])
#fluxdata['SMD']=np.log10(divz(fluxdata['LMASS'],(4*np.pi*fluxdata['re_kpc']**2)))
ms=12
lwbw=2
notbad = np.logical_not(baddata)
#colormap = np.log10(fluxdata['sSFR'])
colormap = fluxdata['re_kpc']
#cut = 2.7
cut = 3.83
colorcut = 1
colorcut1 = 'blue'
colormed1 = 'darkblue'
colorcut2 = 'red'
colormed2 = 'maroon'
propname = 're'
for ax in axarr:
color1='dodgerblue'
color3='darkblue'
color2= 'blue'
color4='black'
if c in [0,1,2]:
massfilt = fluxdata['LMASS']<9.5
else:
massfilt = fluxdata['LMASS']>=9.5
if c in [0,2,3,5]:
col = 'good'
filt = notbad
if combinemass: filt = allgood
color='blue'
elif c in [1,4]:
col = 'low'
filt = notbad
if combinemass: filt = (fluxdata.OBJID < 0)
color='orange'
else:
col = 'bad'
filt = baddata
color='red'
#ax.errorbar(fluxdata[filt][filtar]['av'],fluxdata[filt][filtar]['ar'],xerr=fluxdata[filt][filtar]['dav1'],color=color,marker='o',ms=4,lw=0.5,ls='None')
#ax2.errorbar(fluxdata[filt][filtar]['av'],fluxdata[filt][filtar]['re_kpc'],xerr=fluxdata[filt][filtar]['dav1'],color=color,marker='o',ms=4,lw=0.5,ls='None')
#Titles, axes, legends
acount = 0
filttype = (fluxdata[plotdata]>-98.9)
if c==0:
ax.set_ylabel(ylabel+', LMASS < 9.5',fontsize = axisfont)
if c==3:
ax.set_ylabel(ylabel+', LMASS >= 9.5',fontsize = axisfont)
ax.set_xlabel('Av (mag)',fontsize = axisfont)
ax.tick_params(labelsize = ticksize, size=ticks)
filters = np.logical_and(filt,massfilt)
filters = np.logical_and(filters,filttype)
if c in [0,2,3,5]:
loc1 = np.sin(22.5/180*np.pi)
loc2 = np.sin(45.0/180*np.pi)
loc3 = np.sin(67.5/180*np.pi)
mr1 = (fluxdata[filters]['ar']<loc1)
mr2 = np.logical_and(fluxdata[filters]['ar']>=loc1,fluxdata[filters]['ar']<loc2)
mr3 = np.logical_and(fluxdata[filters]['ar']>=loc2,fluxdata[filters]['ar']<loc3)
mr4 = (fluxdata[filters]['ar']>=loc3)
med1 = np.median(fluxdata[filters][mr1].av)
med2 = np.median(fluxdata[filters][mr2].av)
med3 = np.median(fluxdata[filters][mr3].av)
med4 = np.median(fluxdata[filters][mr4].av)
med751 = np.percentile(fluxdata[filters][mr1].av,75)
med752 = np.percentile(fluxdata[filters][mr2].av,75)
med753 = np.percentile(fluxdata[filters][mr3].av,75)
med754 = np.percentile(fluxdata[filters][mr4].av,75)
emed1 = np.sqrt(biweight_midvariance(fluxdata[filters][mr1].av))/len(fluxdata[filters][mr1])
emed2 = np.sqrt(biweight_midvariance(fluxdata[filters][mr2].av))/len(fluxdata[filters][mr2])
emed3 = np.sqrt(biweight_midvariance(fluxdata[filters][mr3].av))/len(fluxdata[filters][mr3])
emed4 = np.sqrt(biweight_midvariance(fluxdata[filters][mr4].av))/len(fluxdata[filters][mr4])
s1 = np.median(fluxdata[filters][mr1]['ar'])
s2 = np.median(fluxdata[filters][mr2]['ar'])
s3 = np.median(fluxdata[filters][mr3]['ar'])
s4 = np.median(fluxdata[filters][mr4]['ar'])
if c in [0,3]:
ax.errorbar(fluxdata[filters][mr1]['av'],fluxdata[filters][mr1][plotdata],xerr=fluxdata[filters][mr1]['dav1'],color=color1,marker='o',ms=4,lw=0.5,ls='None',label=None)
ax.errorbar(fluxdata[filters][mr2]['av'],fluxdata[filters][mr2][plotdata],xerr=fluxdata[filters][mr2]['dav1'],color=color2,marker='o',ms=4,lw=0.5,ls='None',label=None)
ax.errorbar(fluxdata[filters][mr3]['av'],fluxdata[filters][mr3][plotdata],xerr=fluxdata[filters][mr3]['dav1'],color=color3,marker='o',ms=4,lw=0.5,ls='None',label=None)
ax.errorbar(fluxdata[filters][mr4]['av'],fluxdata[filters][mr4][plotdata],xerr=fluxdata[filters][mr4]['dav1'],color=color4,marker='o',ms=4,lw=0.5,ls='None',label=None)
if colorcut:
#Cut so that we only have SF galaxies
sf = np.log10(fluxdata.sSFR)>-10.5
above = (colormap[filters]>cut)
below = (colormap[filters]<=cut)
above = np.logical_and(above,sf[filters])
below = np.logical_and(below,sf[filters])
ax.errorbar(fluxdata[filters][above]['av'],fluxdata[filters][above][plotdata],xerr=fluxdata[filters][above]['dav1'],color=colorcut1,marker='o',ms=4,lw=0.5,ls='None',label=propname+'>'+str(cut))
ax.errorbar(fluxdata[filters][below]['av'],fluxdata[filters][below][plotdata],xerr=fluxdata[filters][below]['dav1'],color=colorcut2,marker='o',ms=4,lw=0.5,ls='None',label=propname+'<'+str(cut))
medsabove = [np.median(fluxdata[filters][np.logical_and(above,g)].av) for g in [mr1,mr2,mr3,mr4]]
medsbelow = [np.median(fluxdata[filters][np.logical_and(below,g)].av) for g in [mr1,mr2,mr3,mr4]]
#emedsabove = [np.std(fluxdata[filters][np.logical_and(above,g)].av) for g in [mr1,mr2,mr3,mr4]]
emedsabove = 1.49*np.array([np.median(np.abs(fluxdata[filters][np.logical_and(above,g)].av-np.median(fluxdata[filters][np.logical_and(above,g)].av))) for g in [mr1,mr2,mr3,mr4]])
emedsabove = 1.49*np.array([np.median(np.abs(fluxdata[filters][np.logical_and(below,g)].av-np.median(fluxdata[filters][np.logical_and(below,g)].av))) for g in [mr1,mr2,mr3,mr4]])
#emedsbelow = [np.std(fluxdata[filters][np.logical_and(below,g)].av) for g in [mr1,mr2,mr3,mr4]]
ax.legend(fontsize=axisfont-6,loc=4)
s = 12
ax.errorbar(medsabove,[s1,s2,s3,s4],xerr=emedsabove,label='Median ' + propname + ' > ' + str(cut),ms=s,ls='None',marker='x',zorder=10**10, markerfacecolor='None', markeredgecolor=colormed1,mew=4)
ax.errorbar(medsbelow,[s1,s2,s3,s4],xerr=emedsbelow,label='Median ' + propname + ' < ' + str(cut),ms=s,ls='None',marker='o',zorder=10**10, markerfacecolor='None', markeredgecolor=colormed2,mew=4)
else:
ax.errorbar(med1,s1,xerr=emed1,color='red',marker='o',ms=ms,lw=lwbw,ls='None',label=None)
ax.errorbar(med2,s2,xerr=emed2,color='red',marker='o',ms=ms,lw=lwbw,ls='None',label=None)
ax.errorbar(med3,s3,xerr=emed3,color='red',marker='o',ms=ms,lw=lwbw,ls='None',label=None)
ax.errorbar(med4,s4,xerr=emed4,color='red',marker='o',ms=ms,lw=lwbw,ls='None',label='Median in bin')
ax.text(0.685,0.02,'Median in bin',fontsize = axisfont-2, transform=ax.transAxes,color='red')
#ax.errorbar(med751,loc1/2,xerr=emed1,color='red',marker='o',ms=ms,lw=lwbw,ls='None')
#ax.errorbar(med752,(loc1+loc2)/2,xerr=emed2,color='red',marker='o',ms=ms,lw=lwbw,ls='None')
#ax.errorbar(med753,(loc2+loc3)/2,xerr=emed3,color='red',marker='o',ms=ms,lw=lwbw,ls='None')
#ax.errorbar(med754,(1+loc3)/2,xerr=emed4,color='red',marker='o',ms=ms,lw=lwbw,ls='None')
ax.plot((-100,100),(loc1,loc1),color='black',ls='--',label=None)
ax.plot((-100,100),(loc2,loc2),color='black',ls='--',label=None)
ax.plot((-100,100),(loc3,loc3),color='black',ls='--',label=None)
ydist1 = np.arange(len(fluxdata[filters][mr1]['av']))/float(len(fluxdata[filters][mr1]['av']))
xdist1 = np.sort(fluxdata[filters][mr1]['av'])
ydist2 = np.arange(len(fluxdata[filters][mr2]['av']))/float(len(fluxdata[filters][mr2]['av']))
xdist2 = np.sort(fluxdata[filters][mr2]['av'])
ydist3 = np.arange(len(fluxdata[filters][mr3]['av']))/float(len(fluxdata[filters][mr3]['av']))
xdist3 = np.sort(fluxdata[filters][mr3]['av'])
ydist4 = np.arange(len(fluxdata[filters][mr4]['av']))/float(len(fluxdata[filters][mr4]['av']))
xdist4 = np.sort(fluxdata[filters][mr4]['av'])
if c in [2,5]:
ax.plot(xdist1,ydist1,color=color1)
ax.plot(xdist2,ydist2,color=color2)
ax.plot(xdist3,ydist3,color=color3)
ax.plot(xdist4,ydist4,color=color4)
ax.set_ylabel('Cumulative Distribution',fontsize=axisfont)
ax.set_xlim(-0.1,3)
ax.set_ylim(0,1)
c = c+1
if (combinemass and (c in [1,4])): c = c+1
fig.tight_layout()
if colorcut: fig.savefig(figout + 'Av_'+savename+'_'+propname+'_cut.pdf')
elif combinemass: fig.savefig(figout + 'Av_'+savename+'_combmass.pdf')
else:fig.savefig(figout + 'Av_'+savename+'_mass.pdf')
plt.close(fig)
#Color for BoT > 0.1 or 0.2
#Red for BoT <0.2 and r<2.7...
#Remove bulge galaxies since we only want to look at disks
#thinka bout whether to use stddev or se in the mean (stddev/sqrt(n))
#<Av> vs i, arccos(b/a)
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.