repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
appapantula/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
thilbern/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 29 | 3349 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
cjayb/mne-python | tutorials/machine-learning/plot_receptive_field.py | 8 | 15220 | # -*- coding: utf-8 -*-
"""
=====================================================================
Spectro-temporal receptive field (STRF) estimation on continuous data
=====================================================================
This demonstrates how an encoding model can be fit with multiple continuous
inputs. In this case, we simulate the model behind a spectro-temporal receptive
field (or STRF). First, we create a linear filter that maps patterns in
spectro-temporal space onto an output, representing neural activity. We fit
a receptive field model that attempts to recover the original linear filter
that was used to create this data.
References
----------
Estimation of spectro-temporal and spatio-temporal receptive fields using
modeling with continuous inputs is described in:
.. [1] Theunissen, F. E. et al. Estimating spatio-temporal receptive
fields of auditory and visual neurons from their responses to
natural stimuli. Network 12, 289-316 (2001).
.. [2] Willmore, B. & Smyth, D. Methods for first-order kernel
estimation: simple-cell receptive fields from responses to
natural scenes. Network 14, 553-77 (2003).
.. [3] Crosse, M. J., Di Liberto, G. M., Bednar, A. & Lalor, E. C. (2016).
The Multivariate Temporal Response Function (mTRF) Toolbox:
A MATLAB Toolbox for Relating Neural Signals to Continuous Stimuli.
Frontiers in Human Neuroscience 10, 604.
doi:10.3389/fnhum.2016.00604
.. [4] Holdgraf, C. R. et al. Rapid tuning shifts in human auditory cortex
enhance speech intelligibility. Nature Communications, 7, 13654 (2016).
doi:10.1038/ncomms13654
"""
# Authors: Chris Holdgraf <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD (3-clause)
# sphinx_gallery_thumbnail_number = 7
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.decoding import ReceptiveField, TimeDelayingRidge
from scipy.stats import multivariate_normal
from scipy.io import loadmat
from sklearn.preprocessing import scale
rng = np.random.RandomState(1337) # To make this example reproducible
###############################################################################
# Load audio data
# ---------------
#
# We'll read in the audio data from [3]_ in order to simulate a response.
#
# In addition, we'll downsample the data along the time dimension in order to
# speed up computation. Note that depending on the input values, this may
# not be desired. For example if your input stimulus varies more quickly than
# 1/2 the sampling rate to which we are downsampling.
# Read in audio that's been recorded in epochs.
path_audio = mne.datasets.mtrf.data_path()
data = loadmat(path_audio + '/speech_data.mat')
audio = data['spectrogram'].T
sfreq = float(data['Fs'][0, 0])
n_decim = 2
audio = mne.filter.resample(audio, down=n_decim, npad='auto')
sfreq /= n_decim
###############################################################################
# Create a receptive field
# ------------------------
#
# We'll simulate a linear receptive field for a theoretical neural signal. This
# defines how the signal will respond to power in this receptive field space.
n_freqs = 20
tmin, tmax = -0.1, 0.4
# To simulate the data we'll create explicit delays here
delays_samp = np.arange(np.round(tmin * sfreq),
np.round(tmax * sfreq) + 1).astype(int)
delays_sec = delays_samp / sfreq
freqs = np.linspace(50, 5000, n_freqs)
grid = np.array(np.meshgrid(delays_sec, freqs))
# We need data to be shaped as n_epochs, n_features, n_times, so swap axes here
grid = grid.swapaxes(0, -1).swapaxes(0, 1)
# Simulate a temporal receptive field with a Gabor filter
means_high = [.1, 500]
means_low = [.2, 2500]
cov = [[.001, 0], [0, 500000]]
gauss_high = multivariate_normal.pdf(grid, means_high, cov)
gauss_low = -1 * multivariate_normal.pdf(grid, means_low, cov)
weights = gauss_high + gauss_low # Combine to create the "true" STRF
kwargs = dict(vmax=np.abs(weights).max(), vmin=-np.abs(weights).max(),
cmap='RdBu_r', shading='gouraud')
fig, ax = plt.subplots()
ax.pcolormesh(delays_sec, freqs, weights, **kwargs)
ax.set(title='Simulated STRF', xlabel='Time Lags (s)', ylabel='Frequency (Hz)')
plt.setp(ax.get_xticklabels(), rotation=45)
plt.autoscale(tight=True)
mne.viz.tight_layout()
###############################################################################
# Simulate a neural response
# --------------------------
#
# Using this receptive field, we'll create an artificial neural response to
# a stimulus.
#
# To do this, we'll create a time-delayed version of the receptive field, and
# then calculate the dot product between this and the stimulus. Note that this
# is effectively doing a convolution between the stimulus and the receptive
# field. See `here <https://en.wikipedia.org/wiki/Convolution>`_ for more
# information.
# Reshape audio to split into epochs, then make epochs the first dimension.
n_epochs, n_seconds = 16, 5
audio = audio[:, :int(n_seconds * sfreq * n_epochs)]
X = audio.reshape([n_freqs, n_epochs, -1]).swapaxes(0, 1)
n_times = X.shape[-1]
# Delay the spectrogram according to delays so it can be combined w/ the STRF
# Lags will now be in axis 1, then we reshape to vectorize
delays = np.arange(np.round(tmin * sfreq),
np.round(tmax * sfreq) + 1).astype(int)
# Iterate through indices and append
X_del = np.zeros((len(delays),) + X.shape)
for ii, ix_delay in enumerate(delays):
# These arrays will take/put particular indices in the data
take = [slice(None)] * X.ndim
put = [slice(None)] * X.ndim
if ix_delay > 0:
take[-1] = slice(None, -ix_delay)
put[-1] = slice(ix_delay, None)
elif ix_delay < 0:
take[-1] = slice(-ix_delay, None)
put[-1] = slice(None, ix_delay)
X_del[ii][tuple(put)] = X[tuple(take)]
# Now set the delayed axis to the 2nd dimension
X_del = np.rollaxis(X_del, 0, 3)
X_del = X_del.reshape([n_epochs, -1, n_times])
n_features = X_del.shape[1]
weights_sim = weights.ravel()
# Simulate a neural response to the sound, given this STRF
y = np.zeros((n_epochs, n_times))
for ii, iep in enumerate(X_del):
# Simulate this epoch and add random noise
noise_amp = .002
y[ii] = np.dot(weights_sim, iep) + noise_amp * rng.randn(n_times)
# Plot the first 2 trials of audio and the simulated electrode activity
X_plt = scale(np.hstack(X[:2]).T).T
y_plt = scale(np.hstack(y[:2]))
time = np.arange(X_plt.shape[-1]) / sfreq
_, (ax1, ax2) = plt.subplots(2, 1, figsize=(6, 6), sharex=True)
ax1.pcolormesh(time, freqs, X_plt, vmin=0, vmax=4, cmap='Reds',
shading='gouraud')
ax1.set_title('Input auditory features')
ax1.set(ylim=[freqs.min(), freqs.max()], ylabel='Frequency (Hz)')
ax2.plot(time, y_plt)
ax2.set(xlim=[time.min(), time.max()], title='Simulated response',
xlabel='Time (s)', ylabel='Activity (a.u.)')
mne.viz.tight_layout()
###############################################################################
# Fit a model to recover this receptive field
# -------------------------------------------
#
# Finally, we'll use the :class:`mne.decoding.ReceptiveField` class to recover
# the linear receptive field of this signal. Note that properties of the
# receptive field (e.g. smoothness) will depend on the autocorrelation in the
# inputs and outputs.
# Create training and testing data
train, test = np.arange(n_epochs - 1), n_epochs - 1
X_train, X_test, y_train, y_test = X[train], X[test], y[train], y[test]
X_train, X_test, y_train, y_test = [np.rollaxis(ii, -1, 0) for ii in
(X_train, X_test, y_train, y_test)]
# Model the simulated data as a function of the spectrogram input
alphas = np.logspace(-3, 3, 7)
scores = np.zeros_like(alphas)
models = []
for ii, alpha in enumerate(alphas):
rf = ReceptiveField(tmin, tmax, sfreq, freqs, estimator=alpha)
rf.fit(X_train, y_train)
# Now make predictions about the model output, given input stimuli.
scores[ii] = rf.score(X_test, y_test)
models.append(rf)
times = rf.delays_ / float(rf.sfreq)
# Choose the model that performed best on the held out data
ix_best_alpha = np.argmax(scores)
best_mod = models[ix_best_alpha]
coefs = best_mod.coef_[0]
best_pred = best_mod.predict(X_test)[:, 0]
# Plot the original STRF, and the one that we recovered with modeling.
_, (ax1, ax2) = plt.subplots(1, 2, figsize=(6, 3), sharey=True, sharex=True)
ax1.pcolormesh(delays_sec, freqs, weights, **kwargs)
ax2.pcolormesh(times, rf.feature_names, coefs, **kwargs)
ax1.set_title('Original STRF')
ax2.set_title('Best Reconstructed STRF')
plt.setp([iax.get_xticklabels() for iax in [ax1, ax2]], rotation=45)
plt.autoscale(tight=True)
mne.viz.tight_layout()
# Plot the actual response and the predicted response on a held out stimulus
time_pred = np.arange(best_pred.shape[0]) / sfreq
fig, ax = plt.subplots()
ax.plot(time_pred, y_test, color='k', alpha=.2, lw=4)
ax.plot(time_pred, best_pred, color='r', lw=1)
ax.set(title='Original and predicted activity', xlabel='Time (s)')
ax.legend(['Original', 'Predicted'])
plt.autoscale(tight=True)
mne.viz.tight_layout()
###############################################################################
# Visualize the effects of regularization
# ---------------------------------------
#
# Above we fit a :class:`mne.decoding.ReceptiveField` model for one of many
# values for the ridge regularization parameter. Here we will plot the model
# score as well as the model coefficients for each value, in order to
# visualize how coefficients change with different levels of regularization.
# These issues as well as the STRF pipeline are described in detail
# in [1]_, [2]_, and [4]_.
# Plot model score for each ridge parameter
fig = plt.figure(figsize=(10, 4))
ax = plt.subplot2grid([2, len(alphas)], [1, 0], 1, len(alphas))
ax.plot(np.arange(len(alphas)), scores, marker='o', color='r')
ax.annotate('Best parameter', (ix_best_alpha, scores[ix_best_alpha]),
(ix_best_alpha, scores[ix_best_alpha] - .1),
arrowprops={'arrowstyle': '->'})
plt.xticks(np.arange(len(alphas)), ["%.0e" % ii for ii in alphas])
ax.set(xlabel="Ridge regularization value", ylabel="Score ($R^2$)",
xlim=[-.4, len(alphas) - .6])
mne.viz.tight_layout()
# Plot the STRF of each ridge parameter
for ii, (rf, i_alpha) in enumerate(zip(models, alphas)):
ax = plt.subplot2grid([2, len(alphas)], [0, ii], 1, 1)
ax.pcolormesh(times, rf.feature_names, rf.coef_[0], **kwargs)
plt.xticks([], [])
plt.yticks([], [])
plt.autoscale(tight=True)
fig.suptitle('Model coefficients / scores for many ridge parameters', y=1)
mne.viz.tight_layout()
###############################################################################
# Using different regularization types
# ------------------------------------
# In addition to the standard ridge regularization, the
# :class:`mne.decoding.TimeDelayingRidge` class also exposes
# `Laplacian <https://en.wikipedia.org/wiki/Laplacian_matrix>`_ regularization
# term as:
#
# .. math::
# \left[\begin{matrix}
# 1 & -1 & & & & \\
# -1 & 2 & -1 & & & \\
# & -1 & 2 & -1 & & \\
# & & \ddots & \ddots & \ddots & \\
# & & & -1 & 2 & -1 \\
# & & & & -1 & 1\end{matrix}\right]
#
# This imposes a smoothness constraint of nearby time samples and/or features.
# Quoting [3]_:
#
# Tikhonov [identity] regularization (Equation 5) reduces overfitting by
# smoothing the TRF estimate in a way that is insensitive to
# the amplitude of the signal of interest. However, the Laplacian
# approach (Equation 6) reduces off-sample error whilst preserving
# signal amplitude (Lalor et al., 2006). As a result, this approach
# usually leads to an improved estimate of the system’s response (as
# indexed by MSE) compared to Tikhonov regularization.
#
scores_lap = np.zeros_like(alphas)
models_lap = []
for ii, alpha in enumerate(alphas):
estimator = TimeDelayingRidge(tmin, tmax, sfreq, reg_type='laplacian',
alpha=alpha)
rf = ReceptiveField(tmin, tmax, sfreq, freqs, estimator=estimator)
rf.fit(X_train, y_train)
# Now make predictions about the model output, given input stimuli.
scores_lap[ii] = rf.score(X_test, y_test)
models_lap.append(rf)
ix_best_alpha_lap = np.argmax(scores_lap)
###############################################################################
# Compare model performance
# -------------------------
# Below we visualize the model performance of each regularization method
# (ridge vs. Laplacian) for different levels of alpha. As you can see, the
# Laplacian method performs better in general, because it imposes a smoothness
# constraint along the time and feature dimensions of the coefficients.
# This matches the "true" receptive field structure and results in a better
# model fit.
fig = plt.figure(figsize=(10, 6))
ax = plt.subplot2grid([3, len(alphas)], [2, 0], 1, len(alphas))
ax.plot(np.arange(len(alphas)), scores_lap, marker='o', color='r')
ax.plot(np.arange(len(alphas)), scores, marker='o', color='0.5', ls=':')
ax.annotate('Best Laplacian', (ix_best_alpha_lap,
scores_lap[ix_best_alpha_lap]),
(ix_best_alpha_lap, scores_lap[ix_best_alpha_lap] - .1),
arrowprops={'arrowstyle': '->'})
ax.annotate('Best Ridge', (ix_best_alpha, scores[ix_best_alpha]),
(ix_best_alpha, scores[ix_best_alpha] - .1),
arrowprops={'arrowstyle': '->'})
plt.xticks(np.arange(len(alphas)), ["%.0e" % ii for ii in alphas])
ax.set(xlabel="Laplacian regularization value", ylabel="Score ($R^2$)",
xlim=[-.4, len(alphas) - .6])
mne.viz.tight_layout()
# Plot the STRF of each ridge parameter
xlim = times[[0, -1]]
for ii, (rf_lap, rf, i_alpha) in enumerate(zip(models_lap, models, alphas)):
ax = plt.subplot2grid([3, len(alphas)], [0, ii], 1, 1)
ax.pcolormesh(times, rf_lap.feature_names, rf_lap.coef_[0], **kwargs)
ax.set(xticks=[], yticks=[], xlim=xlim)
if ii == 0:
ax.set(ylabel='Laplacian')
ax = plt.subplot2grid([3, len(alphas)], [1, ii], 1, 1)
ax.pcolormesh(times, rf.feature_names, rf.coef_[0], **kwargs)
ax.set(xticks=[], yticks=[], xlim=xlim)
if ii == 0:
ax.set(ylabel='Ridge')
fig.suptitle('Model coefficients / scores for laplacian regularization', y=1)
mne.viz.tight_layout()
###############################################################################
# Plot the original STRF, and the one that we recovered with modeling.
rf = models[ix_best_alpha]
rf_lap = models_lap[ix_best_alpha_lap]
_, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(9, 3),
sharey=True, sharex=True)
ax1.pcolormesh(delays_sec, freqs, weights, **kwargs)
ax2.pcolormesh(times, rf.feature_names, rf.coef_[0], **kwargs)
ax3.pcolormesh(times, rf_lap.feature_names, rf_lap.coef_[0], **kwargs)
ax1.set_title('Original STRF')
ax2.set_title('Best Ridge STRF')
ax3.set_title('Best Laplacian STRF')
plt.setp([iax.get_xticklabels() for iax in [ax1, ax2, ax3]], rotation=45)
plt.autoscale(tight=True)
mne.viz.tight_layout()
| bsd-3-clause |
COMP90024CloudComputing/Submit_Cloud_Computing | search/rest_bris.py | 1 | 5189 | import tweepy
import json
import couchdb
import jsonpickle
import sys,os
import time
from textblob import TextBlob
import requests
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
import matplotlib.path as mplPath
import numpy as np
import socket
maxTweets = 10000000 # Some arbitrary large number
tweetsPerQry = 100 # this is the max the API permits
tweetCount = 0
#create socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', 9999))
print "Connect to socket"
print s.recv(1024)
oauth_keys = [["eNaOt7MW9SUk7zuPQpCrbXTBC", "agxEVyN5z6HtIv9LAK6CNmSU3dH194BMkuALb5oI9PU4Ui5dzK", "855999502255247360-Re3ewycQBkVi08w0rb1sp9bB40cLouA", "nisiRGg3tO50EMmbaU6MvAhnXYXe3FcQ0sjzkolDEPTyV"],
["ahAzM3Wvh4YITVM60G65ZuOpP", "NbWxlbuBsoDA4HELYWlyylY0RASjm0Gtbmsn9Vzbx10ZFXDAGv", "855999502255247360-BjSm0tFrw3v3um0QTRJ0wAvmhiy7cme", "kRUY3kM5MbBNryRDMA20EN7CNE7lSdJEg3FO3z7Omm1BH"],
["YFwd6NZlPBEm2Nu7VUs7eOXva", "ACa2A3C4RrV2TaSa9v1KF3ruO0zSZBC91RPYDh6K1XzYDY8rry", "855999502255247360-p93VgBZJIdb9254jAiCWzxCJ6RFJsLE", "Yrp1QszXQ2NUXJExQC4NR42ew4t7FpHKVD6EpJK8PBFZL"],
["ZQGDhawy20pPmyQmKQ79CtVNu", "Y1goSHJTe70CalKKBhT7EnGgkmiffnevmEvwG34z7IRjVVbBfb", "855999502255247360-wcivhMiEXhZHw5zyDJe0QhulwLaPFUW", "pvWGE9jNQyeHLnnMFkTbXniorXIjlDKuTDBxEPQRkLfXH"],
["ibSGx30BljiLofBRtS77AFETt", "4g2H14S8ugsPme1jELA6Y4O9RR5Sf8EElPLH96F5A8XFarl8VM", "855999502255247360-mwwO3oTq9TKhixLA8Exke4vgV1CnyfD", "med3gQXEWkKQtyC84zSDPIgCH3o2WrHGIx7fliTk6sJEf"]]
auths = []
for consumer_key, consumer_secret, access_key, access_secret in oauth_keys:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
auths.append(auth)
#Pass our consumer key and consumer secret to Tweepy's user authentication handler
#auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
#Pass our access token and access secret to Tweepy's user authentication handler
#auth.set_access_token(access_token, access_secret)
#Creating a twitter API wrapper using tweepy
# If results from a specific ID onwards are reqd, set since_id to that ID.
# else default to no lower limit, go as far back as API allows
sinceId = None
# If results only below a specific ID are, set max_id to that ID.
# else default to no upper limit, start from the most recent tweet matching the search query.
max_id = -1L
switch = 0
while tweetCount < maxTweets:
api = tweepy.API(auths[switch], #monitor_rate_limit=True,
#retry_count=10000, retry_delay=5,
# retry_errors=set([401, 404, 500, 503]),
#wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
#Error handling
if (not api):
print ("Problem connecting to API")
try:
if max_id <= 0:
if (not sinceId):
new_tweets = api.search(q="place:004ec16c62325149", count=tweetsPerQry)
else:
new_tweets = api.search(q="place:004ec16c62325149", count=tweetsPerQry,
since_id=sinceId)
else:
if (not sinceId):
new_tweets = api.search(q="place:004ec16c62325149", count=tweetsPerQry,
max_id=str(max_id - 1))
else:
new_tweets = api.search(q="place:004ec16c62325149", count=tweetsPerQry,
max_id=str(max_id - 1),
since_id=sinceId)
if not new_tweets:
print "No more tweets found"
time.sleep(180)
for tweet in new_tweets:
#Load tweets and send to analysis server
data = json.loads(jsonpickle.encode(tweet._json))
_id = data['id']
text = str(data['text'].encode('ascii','ignore'))
lang = str(data['lang'].encode('ascii'))
created_at = str(data['created_at'].encode('ascii'))
coordinates = "null"
if data['coordinates'] != None:
coordinates = data['coordinates']['coordinates']
else:
print "No coordinate"
place = str(data['place']['full_name'].encode('ascii'))
is_finance = 'false'
send_data = {'id':_id, 'text':text, 'lang':lang, 'created_at':created_at, 'coordinates':coordinates, 'place':place, 'is_finance':is_finance}
send_date_string = json.dumps(send_data)
print "send data"
s.send(send_date_string)
print " Send data success"
tweetCount += len(new_tweets)
try:
max_id = new_tweets[-1].id
except :
continue
print("Downloaded {0} tweets in Brisbane".format(tweetCount))
except tweepy.TweepError as e:
print "switching keys...bris"
switch += 1
if switch > 4:
print "Limit reached"
switch = 0
time.sleep(180)
continue
except StopIteration:
break
| apache-2.0 |
WafaaT/spark-tk | regression-tests/sparktkregtests/testcases/dicom/dicom_filter_test.py | 13 | 10428 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests dicom.filter functionality"""
import unittest
from sparktkregtests.lib import sparktk_test
import os
import dicom
import numpy
import random
from lxml import etree
import datetime
class DicomFilterTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""import dicom data for testing"""
super(DicomFilterTest, self).setUp()
self.dataset = self.get_file("dicom_uncompressed")
self.dicom = self.context.dicom.import_dcm(self.dataset)
self.xml_directory = "../../../datasets/dicom/dicom_uncompressed/xml/"
self.image_directory = "../../../datasets/dicom/dicom_uncompressed/imagedata/"
self.query = ".//DicomAttribute[@keyword='KEYWORD']/Value/text()"
self.count = self.dicom.metadata.count()
def test_filter_one_key(self):
"""test filter with basic filter function"""
# extract a key-value pair from the first row metadata for our use
first_row = self.dicom.metadata.to_pandas()["metadata"][0]
xml = etree.fromstring(first_row.encode("ascii", "ignore"))
patient_id = xml.xpath(self.query.replace("KEYWORD", "PatientID"))[0]
# ask dicom to filter using our key-value filter function
self.dicom.filter(self._filter_key_values({ "PatientID" : patient_id }))
# we generate our own result to compare to dicom's
expected_result = self._filter({ "PatientID" : patient_id })
# ensure results match
self._compare_dicom_with_expected_result(expected_result)
def test_filter_multi_key(self):
"""test filter with basic filter function mult keyval pairs"""
# first we extract key-value pairs from the first row's metadata
# for our own use to generate a key-val dictionary
first_row = self.dicom.metadata.to_pandas()["metadata"][0]
xml = etree.fromstring(first_row.encode("ascii", "ignore"))
patient_id = xml.xpath(self.query.replace("KEYWORD", "PatientID"))[0]
sopi_id = xml.xpath(self.query.replace("KEYWORD", "SOPInstanceUID"))[0]
key_val = { "PatientID" : patient_id, "SOPInstanceUID" : sopi_id }
# we use our filter function and ask dicom to filter
self.dicom.filter(self._filter_key_values(key_val))
# here we generate our own result
expected_result = self._filter(key_val)
# compare expected result to what dicom gave us
self._compare_dicom_with_expected_result(expected_result)
def test_filter_zero_matching_records(self):
"""test filter with filter function returns none"""
# we give dicom a filter function which filters by
# key-value and give it a key-value pair which will
# return 0 records
pandas = self.dicom.metadata.to_pandas()
self.dicom.filter(self._filter_key_values({ "PatientID" : -6 }))
self.assertEqual(0, self.dicom.metadata.count())
def test_filter_nothing(self):
"""test filter with filter function filters nothing"""
# this filter function will return all records
self.dicom.filter(self._filter_nothing())
self.assertEqual(self.dicom.metadata.count(), self.count)
def test_filter_everything(self):
"""test filter function filter everything"""
# filter_everything filter out all of the records
self.dicom.filter(self._filter_everything())
self.assertEqual(0, self.dicom.metadata.count())
def test_filter_timestamp_range(self):
"""test filter with timestamp range function"""
# we will test filter with a function which takes a begin and end
# date and returns all records with a study date between them
# we will set begin date to 15 years ago and end date to 5 years ago
begin_date = datetime.datetime.now() - datetime.timedelta(days=15*365)
end_date = datetime.datetime.now() - datetime.timedelta(days=5*365)
# here we will generate our own result by filtering for records
# which meet our criteria
expected_result = []
pandas = self.dicom.metadata.to_pandas()
# iterate through the rows and append all records with
# a study date between our begin and end date
for index, row in pandas.iterrows():
ascii_row = row["metadata"].encode("ascii", "ignore")
xml_root = etree.fromstring(ascii_row)
study_date = xml_root.xpath(self.query.replace("KEYWORD", "StudyDate"))[0]
datetime_study_date = datetime.datetime.strptime(study_date, "%Y%m%d")
if datetime_study_date > begin_date and datetime_study_date < end_date:
expected_result.append(ascii_row)
# now we ask dicom to use our filter function below to return
# all records with a StudyDate within our specified range
self.dicom.filter(self._filter_timestamp_range(begin_date, end_date))
# ensure that expected result matches actual
self._compare_dicom_with_expected_result(expected_result)
def test_return_type_str(self):
"""test filter with function that returns strings"""
self.dicom.filter(self._filter_return_string())
self.assertEqual(3, self.dicom.metadata.count())
def test_return_type_int(self):
"""test filter wtih function that returns ints"""
self.dicom.filter(self._filter_return_int())
self.assertEqual(3, self.dicom.metadata.count())
def test_filter_has_bugs(self):
"""test filter with a broken filter function"""
with self.assertRaisesRegexp(Exception, "this filter is broken!"):
self.dicom.filter(self._filter_has_bugs())
self.dicom.metadata.count()
def test_filter_invalid_param(self):
"""test filter with an invalid param type"""
# should fail because filter takes a function not a keyvalue pair
with self.assertRaisesRegexp(Exception, "'dict' object is not callable"):
self.dicom.filter({ "PatientID" : "bla" })
self.dicom.metadata.count()
def test_filter_invalid_function(self):
"""test filter with function which takes more than one param"""
with self.assertRaisesRegexp(Exception, "takes exactly 2 arguments"):
self.dicom.filter(self._filter_invalid())
self.dicom.metadata.count()
def _filter_key_values(self, key_val):
"""filter by key-value"""
def _filter_key_value(row):
metadata = row["metadata"].encode("ascii", "ignore")
xml_root = etree.fromstring(metadata)
for key in key_val:
xml_element_value = xml_root.xpath(".//DicomAttribute[@keyword='" + key + "']/Value/text()")[0]
if xml_element_value != key_val[key]:
return False
else:
return True
return _filter_key_value
def _filter_nothing(self):
"""returns all records"""
def _filter_nothing(row):
return True
return _filter_nothing
def _filter_everything(self):
"""returns no records"""
def _filter_everything(row):
return False
return _filter_everything
def _filter_timestamp_range(self, begin_date, end_date):
"""return records within studydate date range"""
def _filter_timestamp_range(row):
metadata = row["metadata"].encode("ascii", "ignore")
xml_root = etree.fromstring(metadata)
timestamp = xml_root.xpath(".//DicomAttribute[@keyword='StudyDate']/Value/text()")[0]
timestamp = datetime.datetime.strptime(timestamp, "%Y%m%d")
if begin_date < timestamp and timestamp < end_date:
return True
else:
return False
return _filter_timestamp_range
def _filter_return_string(self):
"""filter function which returns str"""
def _filter_return_string(row):
return "True"
return _filter_return_string
def _filter_return_int(self):
"""filter function returns int"""
def _filter_return_int(row):
return -1
return _filter_return_int
def _filter_has_bugs(self):
"""broken filter function"""
def _filter_has_bugs(row):
raise Exception("this filter is broken!")
return _filter_has_bugs
def _filter_invalid(self):
"""filter function takes 2 params"""
# filter is invalid because it takes
# 2 parameters
def _filter_invalid(index, row):
return True
return _filter_invalid
def _filter(self, keywords):
"""filter records by key value pair"""
# here we are generating the expected result
matching_records = []
pandas_metadata = self.dicom.metadata.to_pandas()["metadata"]
for row in pandas_metadata:
ascii_xml = row.encode("ascii", "ignore")
xml = etree.fromstring(row.encode("ascii", "ignore"))
for keyword in keywords:
this_row_keyword_value = xml.xpath(self.query.replace("KEYWORD", keyword))
if this_row_keyword_value == keyword:
matching_records.append(ascii_xml)
return matching_records
def _compare_dicom_with_expected_result(self, expected_result):
"""compare expected result with actual result"""
pandas_result = self.dicom.metadata.to_pandas()["metadata"]
for expected, actual in zip(expected_result, pandas_result):
actual_ascii = actual.encode("ascii", "ignore")
self.assertEqual(actual_ascii, expected)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
ocefpaf/iris | docs/iris/src/sphinxext/gen_gallery.py | 2 | 6174 | #
# (C) Copyright 2012 MATPLOTLIB (vn 1.2.0)
#
'''
Generate a thumbnail gallery of examples.
'''
import os
import glob
import re
import warnings
import matplotlib.image as image
from sphinx.util import status_iterator
from sphinx.util import status_iterator
template = '''\
{{% extends "layout.html" %}}
{{% set title = "Thumbnail gallery" %}}
{{% block body %}}
<h3>Click on any image to see full size image and source code</h3>
<br/>
<ul>
<li><a class="reference internal" href="#">Gallery</a>
<ul>
{}
</ul>
</li>
</ul>
{}
{{% endblock %}}
'''
multiimage = re.compile('(.*?)(_\d\d){1,2}')
def make_thumbnail(args):
image.thumbnail(args[0], args[1], 0.4)
def out_of_date(original, derived):
return (not os.path.exists(derived) or
os.stat(derived).st_mtime < os.stat(original).st_mtime)
def gen_gallery(app, doctree):
if app.builder.name != 'html':
return
outdir = app.builder.outdir
rootdir = 'examples'
# Images we want to skip for the gallery because they are an unusual
# size that doesn't layout well in a table, or because they may be
# redundant with other images or uninteresting.
skips = set([
'mathtext_examples',
'matshow_02',
'matshow_03',
'matplotlib_icon'])
thumbnails = {}
rows = []
random_image = []
toc_rows = []
link_template = ('<a href="{href}">'
'<img src="{thumb_file}" border="0"'
' alt="{alternative_text}"/>'
'</a>')
header_template = ('<div class="section" id="{}">'
'<h4>{}'
'<a class="headerlink" href="#{}"'
' title="Permalink to this headline">¶</a>'
'</h4>')
toc_template = ('<li>'
'<a class="reference internal" href="#{}">{}</a>'
'</li>')
random_image_content_template = '''
// This file was automatically generated by gen_gallery.py & should not be
// modified directly.
images = new Array();
{}
'''
random_image_template = "['{thumbfile}', '{full_image}', '{link}'];"
random_image_join = 'images[{}] = {}'
dirs = ('General', 'Meteorology', 'Oceanography')
for subdir in dirs:
rows.append(header_template.format(subdir, subdir, subdir))
toc_rows.append(toc_template.format(subdir, subdir))
origdir = os.path.join(os.path.dirname(outdir), rootdir, subdir)
if not os.path.exists(origdir):
origdir = os.path.join(os.path.dirname(outdir), 'plot_directive',
rootdir, subdir)
thumbdir = os.path.join(outdir, rootdir, subdir, 'thumbnails')
if not os.path.exists(thumbdir):
os.makedirs(thumbdir)
data = []
for filename in sorted(glob.glob(os.path.join(origdir, '*.png'))):
if filename.endswith('hires.png'):
continue
path, filename = os.path.split(filename)
basename, ext = os.path.splitext(filename)
if basename in skips:
continue
# Create thumbnails based on images in tmpdir, and place them
# within the build tree.
orig_path = str(os.path.join(origdir, filename))
thumb_path = str(os.path.join(thumbdir, filename))
if out_of_date(orig_path, thumb_path) or True:
thumbnails[orig_path] = thumb_path
m = multiimage.match(basename)
if m is not None:
basename = m.group(1)
data.append((subdir, basename,
os.path.join(rootdir, subdir, 'thumbnails',
filename)))
for (subdir, basename, thumbfile) in data:
if thumbfile is not None:
anchor = os.path.basename(thumbfile)
anchor = os.path.splitext(anchor)[0].replace('_', '-')
link = 'examples/{}/{}.html#{}'.format(
subdir,
basename,
anchor)
rows.append(link_template.format(
href=link,
thumb_file=thumbfile,
alternative_text=basename))
random_image.append(random_image_template.format(
link=link,
thumbfile=thumbfile,
basename=basename,
full_image='_images/' + os.path.basename(thumbfile)))
if len(data) == 0:
warnings.warn('No thumbnails were found in {}'.format(subdir))
# Close out the <div> opened up at the top of this loop.
rows.append('</div>')
# Generate JS list of images for front page.
random_image_content = '\n'.join([random_image_join.format(i, line)
for i, line in enumerate(random_image)])
random_image_content = random_image_content_template.format(
random_image_content)
random_image_script_path = os.path.join(app.builder.srcdir,
'_static',
'random_image.js')
with open(random_image_script_path, 'w') as fh:
fh.write(random_image_content)
content = template.format('\n'.join(toc_rows),
'\n'.join(rows))
# Only write out the file if the contents have actually changed.
# Otherwise, this triggers a full rebuild of the docs.
gallery_path = os.path.join(app.builder.srcdir,
'_templates',
'gallery.html')
if os.path.exists(gallery_path):
with open(gallery_path, 'r') as fh:
regenerate = fh.read() != content
else:
regenerate = True
if regenerate:
with open(gallery_path, 'w') as fh:
fh.write(content)
for key in status_iterator(thumbnails, 'generating thumbnails... ',
length=len(thumbnails)):
image.thumbnail(key, thumbnails[key], 0.3)
def setup(app):
app.connect('env-updated', gen_gallery)
| lgpl-3.0 |
jmetzen/scikit-learn | examples/model_selection/plot_validation_curve.py | 141 | 1931 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.model_selection import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
lw = 2
plt.semilogx(param_range, train_scores_mean, label="Training score",
color="darkorange", lw=lw)
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color="darkorange", lw=lw)
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="navy", lw=lw)
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color="navy", lw=lw)
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
anamariad/ML | Clusterization/clusterization/utils.py | 1 | 1884 | __author__ = 'Annouk'
import os
import numpy as np
import matplotlib.pyplot as plt
def read_points(csv_file_name):
points = np.loadtxt(csv_file_name, delimiter = ',')
return points
def plot_clusters(centroids, clusters):
# we assume centroids is a list of points and clusters is a dictionary of arrays
# we transpose them, so we could get 2 different arrays, representing the X and Y coordinates
clusters_no = len(centroids)
centroids_transposed = np.array(centroids).T
x_centroids = centroids_transposed[0]
y_centroids = centroids_transposed[1]
for i in range(clusters_no):
cluster = np.array(clusters[i]).transpose()
x_cluster = cluster[0]
y_cluster = cluster[1]
# setting a different color for each cluster
plt.scatter(x_cluster, y_cluster, c = np.random.rand(1, 3))
plt.scatter(x_centroids, y_centroids, c = 'r')
plt.show()
def plot_clusters_1d(centroids, clusters):
# adapting the above function to work with one dimensional data
clusters_no = len(clusters)
x_centroids = np.array(centroids)
y_centroids = np.zeros(clusters_no)
for i in range(clusters_no):
x_cluster = np.array(clusters[i])
y_cluster = np.zeros(len(clusters[i]))
# setting a different color for each cluster
plt.scatter(x_cluster, y_cluster, c = np.random.rand(1, 3))
plt.scatter(x_centroids, y_centroids, c = 'r')
plt.show()
def convergence(centroids, oldcentroids, threshold):
return np.linalg.norm(centroids - oldcentroids) < threshold
def print_matrix_to_file(matrix, output_file):
for i in range(len(matrix)):
output_file.write(','.join(str(nr) for nr in matrix[i]))
output_file.write('\n')
def print_array_to_file(array, output_file):
output_file.write(','.join(str(nr) for nr in array))
output_file.write('\n') | apache-2.0 |
plowman/python-mcparseface | models/autoencoder/MaskingNoiseAutoencoderRunner.py | 10 | 1689 | import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from autoencoder.autoencoder_models.DenoisingAutoencoder import MaskingNoiseAutoencoder
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
def standard_scale(X_train, X_test):
preprocessor = prep.StandardScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train, X_test
def get_random_block_from_data(data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
X_train, X_test = standard_scale(mnist.train.images, mnist.test.images)
n_samples = int(mnist.train.num_examples)
training_epochs = 100
batch_size = 128
display_step = 1
autoencoder = MaskingNoiseAutoencoder(n_input = 784,
n_hidden = 200,
transfer_function = tf.nn.softplus,
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001),
dropout_probability = 0.95)
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
for i in range(total_batch):
batch_xs = get_random_block_from_data(X_train, batch_size)
cost = autoencoder.partial_fit(batch_xs)
avg_cost += cost / n_samples * batch_size
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch + 1), \
"cost=", "{:.9f}".format(avg_cost)
print "Total cost: " + str(autoencoder.calc_total_cost(X_test))
| apache-2.0 |
mne-tools/mne-python | mne/io/array/tests/test_array.py | 4 | 6460 | # Author: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_allclose,
assert_equal)
import pytest
import matplotlib.pyplot as plt
from mne import find_events, Epochs, pick_types
from mne.io import read_raw_fif
from mne.io.array import RawArray
from mne.io.tests.test_raw import _test_raw_reader
from mne.io.meas_info import create_info
from mne.io.pick import get_channel_type_constants
from mne.channels import make_dig_montage
base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data')
fif_fname = op.join(base_dir, 'test_raw.fif')
def test_long_names():
"""Test long name support."""
info = create_info(['a' * 15 + 'b', 'a' * 16], 1000., verbose='error')
data = np.empty((2, 1000))
raw = RawArray(data, info)
assert raw.ch_names == ['a' * 15 + 'b', 'a' * 16]
# and a way to get the old behavior
raw.rename_channels({k: k[:13] for k in raw.ch_names},
allow_duplicates=True, verbose='error')
assert raw.ch_names == ['a' * 13 + '-0', 'a' * 13 + '-1']
info = create_info(['a' * 16] * 11, 1000., verbose='error')
data = np.empty((11, 1000))
raw = RawArray(data, info)
assert raw.ch_names == ['a' * 16 + '-%s' % ii for ii in range(11)]
def test_array_copy():
"""Test copying during construction."""
info = create_info(1, 1000.)
data = np.empty((1, 1000))
# 'auto' (default)
raw = RawArray(data, info)
assert raw._data is data
assert raw.info is not info
raw = RawArray(data.astype(np.float32), info)
assert raw._data is not data
assert raw.info is not info
# 'info' (more restrictive)
raw = RawArray(data, info, copy='info')
assert raw._data is data
assert raw.info is not info
with pytest.raises(ValueError, match="data copying was not .* copy='info"):
RawArray(data.astype(np.float32), info, copy='info')
# 'data'
raw = RawArray(data, info, copy='data')
assert raw._data is not data
assert raw.info is info
# 'both'
raw = RawArray(data, info, copy='both')
assert raw._data is not data
assert raw.info is not info
raw = RawArray(data.astype(np.float32), info, copy='both')
assert raw._data is not data
assert raw.info is not info
# None
raw = RawArray(data, info, copy=None)
assert raw._data is data
assert raw.info is info
with pytest.raises(ValueError, match='data copying was not .* copy=None'):
RawArray(data.astype(np.float32), info, copy=None)
@pytest.mark.slowtest
def test_array_raw():
"""Test creating raw from array."""
# creating
raw = read_raw_fif(fif_fname).crop(2, 5)
data, times = raw[:, :]
sfreq = raw.info['sfreq']
ch_names = [(ch[4:] if 'STI' not in ch else ch)
for ch in raw.info['ch_names']] # change them, why not
types = list()
for ci in range(101):
types.extend(('grad', 'grad', 'mag'))
types.extend(['ecog', 'seeg', 'hbo']) # really 4 meg channels
types.extend(['stim'] * 9)
types.extend(['dbs']) # really eeg channel
types.extend(['eeg'] * 60)
picks = np.concatenate([pick_types(raw.info, meg=True)[::20],
pick_types(raw.info, meg=False, stim=True),
pick_types(raw.info, meg=False, eeg=True)[::20]])
del raw
data = data[picks]
ch_names = np.array(ch_names)[picks].tolist()
types = np.array(types)[picks].tolist()
types.pop(-1)
# wrong length
pytest.raises(ValueError, create_info, ch_names, sfreq, types)
# bad entry
types.append('foo')
pytest.raises(KeyError, create_info, ch_names, sfreq, types)
types[-1] = 'eog'
# default type
info = create_info(ch_names, sfreq)
assert_equal(info['chs'][0]['kind'],
get_channel_type_constants()['misc']['kind'])
# use real types
info = create_info(ch_names, sfreq, types)
raw2 = _test_raw_reader(RawArray, test_preloading=False,
data=data, info=info, first_samp=2 * data.shape[1])
data2, times2 = raw2[:, :]
assert_allclose(data, data2)
assert_allclose(times, times2)
assert ('RawArray' in repr(raw2))
pytest.raises(TypeError, RawArray, info, data)
# filtering
picks = pick_types(raw2.info, meg=True, misc=True, exclude='bads')[:4]
assert_equal(len(picks), 4)
raw_lp = raw2.copy()
kwargs = dict(fir_design='firwin', picks=picks)
raw_lp.filter(None, 4.0, h_trans_bandwidth=4., **kwargs)
raw_hp = raw2.copy()
raw_hp.filter(16.0, None, l_trans_bandwidth=4., **kwargs)
raw_bp = raw2.copy()
raw_bp.filter(8.0, 12.0, l_trans_bandwidth=4., h_trans_bandwidth=4.,
**kwargs)
raw_bs = raw2.copy()
raw_bs.filter(16.0, 4.0, l_trans_bandwidth=4., h_trans_bandwidth=4.,
**kwargs)
data, _ = raw2[picks, :]
lp_data, _ = raw_lp[picks, :]
hp_data, _ = raw_hp[picks, :]
bp_data, _ = raw_bp[picks, :]
bs_data, _ = raw_bs[picks, :]
sig_dec = 15
assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec)
assert_array_almost_equal(data, bp_data + bs_data, sig_dec)
# plotting
raw2.plot()
raw2.plot_psd(tmax=2., average=True, n_fft=1024,
spatial_colors=False)
plt.close('all')
# epoching
events = find_events(raw2, stim_channel='STI 014')
events[:, 2] = 1
assert len(events) > 2
epochs = Epochs(raw2, events, 1, -0.2, 0.4, preload=True)
evoked = epochs.average()
assert_equal(evoked.nave, len(events) - 1)
# complex data
rng = np.random.RandomState(0)
data = rng.randn(1, 100) + 1j * rng.randn(1, 100)
raw = RawArray(data, create_info(1, 1000., 'eeg'))
assert_allclose(raw._data, data)
# Using digital montage to give MNI electrode coordinates
n_elec = 10
ts_size = 10000
Fs = 512.
ch_names = [str(i) for i in range(n_elec)]
ch_pos_loc = np.random.randint(60, size=(n_elec, 3)).tolist()
data = np.random.rand(n_elec, ts_size)
montage = make_dig_montage(
ch_pos=dict(zip(ch_names, ch_pos_loc)),
coord_frame='head'
)
info = create_info(ch_names, Fs, 'ecog')
raw = RawArray(data, info)
raw.set_montage(montage)
raw.plot_psd(average=False) # looking for nonexistent layout
raw.plot_psd_topo()
| bsd-3-clause |
bthirion/scikit-learn | examples/calibration/plot_calibration_multiclass.py | 95 | 6971 | """
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Plot modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
| bsd-3-clause |
astronomeara/xastropy-old | xastropy/phot/ian_phot.py | 7 | 46065 | """Python script for various photometry tasks. See especially
:func:`aperphot`, for basic aperture photometry. If you need
something fancier, try PyRAF, DAOPHOT, etc.
"""
# 2008-12-21 18:27 IJC: Created
from numpy import array, sign, pi, nan, arange
import numpy as np
import pdb
#try:
# import psyco
# psyco.full()
#except ImportError:
# print 'Psyco not installed, the program will just run slower'
class phot:
def __init__(self, **kw):
"""Generate a photometry object w/keywords and kw+'str' descriptive keywords
Inputs cannot be numpy arrays"""
# 2009-09-14 10:07 IJC: Created
keylist = ['time', 'phot', 'ephot', 'bg', 'ebg', 'aper', 'position', 'eposition', \
'ditherposition', 'object', 'filename', 'exptime', 'ra', 'dec']
defaults = dict(photunit=None)
for key in keylist:
defaults[key]=None
defaults[key+'str']=None
keylist = keylist + [key+'str']
for keyword in defaults:
if (not kw.has_key(keyword)):
kw[keyword] = defaults[keyword]
for k in kw.keys():
if kw[k].__class__==str:
exec('self.'+k+'="'+str(kw[k]))+'"'
else:
exec('self.'+k+'='+str(kw[k]))
self.comment = "Created by IJC's spitzer.phot"
return
def __str__(self): # Return info
lin= 'Phot: {} at pixel ({:.1f},{:.1f})\n'.format(self.object,
self.position[0],
self.position[1])
lin+= ' Analyzed frame {}\n'.format(self.filename)
lin+= ' Exposure time = {:.1f}s\n'.format(self.exptime)
lin+= ' Aperture = {}, {}, {}\n'.format(self.aper[0],
self.aper[1],self.aper[2])
#lin+= ' {}\n'.format(self.aperstr)
lin+= ' Counts/s = {} +/- {}\n'.format(self.phot,self.ephot)
return lin
def estbg(im, mask=None, bins=None, plotalot=False, rout=(3,200), badval=nan):
"""Estimate the background value of a masked image via histogram fitting.
INPUTS:
im -- numpy array. Input image.
OPTIONAL INPUTS:
mask -- numpy array. logical mask, False/0 in regions to ignore
bins -- sequence. edges of bins to pass to HIST
plotalot -- bool. Plot the histogram and fit.
rout -- 2-tuple of (nsigma, niter) for analysis.removeoutliers.
Set to (Inf, 0) to not cut any outliers.
badval -- value returned when things go wrong.
OUTPUT:
b, s_b -- tuple of (background, error on background) from gaussian fit.
Note that the error is analagous to the standard deviation on the mean
COMMENTS:
The fit parameters appear to be robust across a fairly wide range of bin sizes. """
# 2009-09-02 17:13 IJC: Created!
# 2009-09-04 15:07 IJC: Added RemoveOutliers option. Use only non-empty bins in fit.
# 2009-09-08 15:32 IJC: Error returned is now divided by sqrt(N) for SDOM
# 2009-11-03 00:16 IJC: Improved guess for gaussian dispersion
# 2011-05-18 11:47 IJMC: Moved (e)gaussian imports to analysis.
# 2012-01-01 21:04 IJMC: Added badval option
# 2012-08-15 17:45 IJMC: Numpy's new histogram no longer accepts 'new' keyword
# 2013-03-20 08:22 IJMC: Now works better even for small numbers
# of pixels; thanks to A. Weigel @
# ETH-Zurich for catching this!
from numpy import histogram, mean, median, sqrt, linspace, isfinite, ones,std
from pylab import find
from scipy import optimize
from xastropy.phot.ian_analysis import removeoutliers, egaussian, gaussian, stdr
if plotalot:
from pylab import figure, errorbar, plot, colorbar, title, hist, mean, std
#from analysis import imshow
def gaussianChiSquared(guess, x, y, err):
return (egaussian(guess, x, y, e=err)**2).sum()
if mask==None:
mask = ones(im.shape)
dat = im.ravel()[find(mask<>0)]
if plotalot:
figure(); plot(im.ravel()); plot(dat)
print mean(dat), std(dat), rout[0]*std(dat)
print len(dat), (abs(dat-mean(dat))<(rout[0]*std(dat))).sum()
figure(); plot(dat-mean(dat));
plot([0,len(dat)], [rout[0]*std(dat),rout[0]*std(dat)],'--k')
plot([0,len(dat)], [-rout[0]*std(dat),-rout[0]*std(dat)],'--k')
dat = removeoutliers(dat, rout[0], remove='both', center='mean', niter=rout[1], verbose=plotalot)
ndat = len(dat)
if ndat==0:
print "No data to work with!"
return (badval, badval)
if bins==None:
if plotalot: print "no bins entered!"
datmean = dat.mean()
datstd = stdr(dat, nsigma=3)
nunique = len(np.unique(dat.ravel()))
#pdb.set_trace()
if nunique > len(dat)/20.:
dobin = False
else:
dobin = True
bins = linspace(dat.min(), dat.max(), nunique/2)
if plotalot:
print "dat.mean, dat.std>>" + str((dat.mean(), dat.std()))
#if plotalot:
# figure(); hout = hist(dat[datIndex],bins)
#else:
if dobin:
binwidth = mean(bins[1::]-bins[:-1])
bincenter = 0.5*(bins[1::]+bins[:-1])
datIndex = (dat>=bins.min()) * (dat<=bins.max())
hout = histogram(dat[datIndex], bins) #,new=True)
gy = hout[0]
erry = sqrt(gy)
usableIndex = gy>0
eff_binwidth = mean(bins[usableIndex][1::]-bins[usableIndex][:-1])
guess = [gy.sum()*eff_binwidth, std(dat[datIndex]), median(dat[datIndex])]
if 1.0*usableIndex.sum()/usableIndex.size < 0.5:
out = guess
else:
out = optimize.fmin(gaussianChiSquared, guess, \
args=(bincenter[usableIndex],gy[usableIndex], erry[usableIndex]), \
disp=plotalot)
if plotalot:
from pylab import figure, errorbar, plot, colorbar, title
from nsdata import imshow
print 'guess>>',guess
print 'fit>>',out
figure()
imshow(im); colorbar()
figure()
errorbar(bincenter[usableIndex], gy[usableIndex], erry[usableIndex], fmt='ob')
plot(bincenter, gaussian(out, bincenter),'-r', linewidth=2)
title('Mean: %f, Std. Dev.: %f' % (out[2], out[1]))
ret = out[2], out[1]/sqrt(ndat)
else:
ret = datmean, datstd/sqrt(ndat)
return ret
def makemask(x,y,params, shape='circle'):
"""Generate a binary (logical) mask with given shape and location.
INPUTS:
x = x-coodinate system (made with meshgrid)
y = y-coodinate system (made with meshgrid)
params:
shape='circle':
params(1) = x-offset
params(2) = y-offset
params(3) = x-diameter
params(4) = OPTIONAL y-diameter
shape='quad':
params: list of quadrants to include in the mask. The
midpoint for determining quadrants will be
mid = (xmax+xmin)/2. Quadrants are:
0: x<midX and y<midY
1: x<midX and y>=midY
2: x>=midX and y<midY
3: x>=midX and y>=midY
OPTIONAL INPUTS:
shape=: desired mask shape. Currently only 'circle' is valid.
OUTPUTS:
mask = NxM grided representation of specified mask
where NxM are the size of the x,y input meshgrids
EXAMPLE:
x=arange(32); y=arange(64)
xx,yy = meshgrid(x,y)
m = makemask(xx,yy,(24, 53, 10))
m[53,24] # ----> True
"""
# 2009-09-02 10:47 IJC: Created. Adapted from Joe Green's MakeEllipse.m
# 2009-09-27 13:37 IJC: Added 'quad' quadrant mask option.
from numpy import zeros, bool
if not x.shape==y.shape:
print "x,y meshgrid coordinates must be the same shape! Exiting."
return -1
if shape=='circle':
if len(params)<3:
print "Must give at least 3 parameters to mask."
return -1
x0 = params[0]
y0 = params[1]
xdia =params[2]
if len(params)==3:
ydia = xdia
else:
ydia = params[3]
mask = ( (((x-x0)/(xdia/2.))**2 + ((y-y0)/(ydia/2.))**2) < 1 )
elif shape=='quad':
midx = (x.max()+x.min())/2.
midy = (y.max()+y.min())/2.
mask = zeros(x.shape, bool)
for ii in range(len(params)):
if params[ii]==0:
mask += (x<midx) * (y<midy)
elif params[ii]==1:
mask += (x<midx) * (y>=midy)
elif params[ii]==2:
mask += (x>=midx) * (y<midy)
elif params[ii]==3:
mask += (x>=midx) * (y>=midy)
return mask
def centroid(im, mask=None, w=None, x=None, y=None):
"""Compute the centroid of an image with a specified binary mask projected upon it.
INPUT:
im -- image array
mask -- binary mask, 0 in ignored regions and 1 in desired regions
w is typically 1.0/u**2, where u is the uncertainty on im
x,y are those generated by meshgrid.
OUTPUT:
(x0,y0) tuple of centroid location"""
from numpy import ones, arange, meshgrid
# 2009-09-02 13:35 IJC: Created
if mask==None:
mask = ones(im.shape)
if w==None:
w = ones(im.shape)
if not (im.shape==mask.shape and im.shape==w.shape):
print "Image, mask, and weights must have same shape! Exiting."
return -1
if x==None or y==None:
xx = arange(im.shape[1])
yy = arange(im.shape[0])
x,y = meshgrid(xx,yy)
x0 = (x*im*mask*w).sum()/(im*mask*w).sum()
y0 = (y*im*mask*w).sum()/(im*mask*w).sum()
return (x0,y0)
def lmcextinct(ra, dec, **kw):
"""Use the Zaritsky & Harris (ZH) map to get A_V extinction in the LMC.
INPUT:
ra -- decimal degrees of right ascension
dec -- decimal degrees of declination
OPTIONAL INPUT:
method='griddata' /'nearest' -- interpolation method
map='both' /'hot'/'cool' -- Which ZH map to use
hot='lmc_hotav.fits' -- filename of hot map
cool='lmc_coolav.fits' -- filename of cool map
null=0.0 -- "no data" value in map
verbose=False / True -- print out some spam and eggs
EXAMPLE:
ra = hms('05:51:56.6')
dec = dms('-66:25:08.5')
lmcextinct(ra, dec)
If their map returns null, interpolate from the surroundings.
Note that these calculations are definitely _not_ optimized.
SEE ALSO:
hms, dms"""
# 2009-02-15 22:11 IJC: Created and tested.
import astropy.io.fits as pyfits
from matplotlib.mlab import griddata
from pylab import meshgrid, arange, array, sqrt, cos, sin, arctan2, arcsin
ra = array([ra]).copy().ravel()
dec = array([dec]).copy().ravel()
defaults = dict(method='griddata', map='hot', hot='lmc_hotav_clean2.fits',
cool='lmc_coolav.fits', null=0.0, verbose=False)
for key in defaults:
if (not kw.has_key(key)):
kw[key] = defaults[key]
if kw['map']=='both':
kw['map'] = 'hot'
ret1 = lmcextinct(ra, dec, **kw)
kw['map'] = 'cool'
ret2 = lmcextinct(ra, dec, **kw)
return (ret1, ret2)
else:
map = kw[kw['map']]
verbose = kw['verbose']
avmap = pyfits.getdata(map)
avhdr = pyfits.getheader(map)
nx = avhdr['naxis1']
ny = avhdr['naxis2']
cx = avhdr['crpix1']
cy = avhdr['crpix2']
x0 = avhdr['crval1']
y0 = avhdr['crval2']
dx = avhdr['cd1_1']
dy = avhdr['cd2_2']
xx,yy = meshgrid(arange(nx),arange(ny))
goodind = (avmap<>kw['null'])
# I don't know how the following WCS calculation works, but it
# does -- thank you, Calabretta & Greisen 2002!
d2r = pi/180.
phi = arctan2( xx-cx+1, yy-cy+1) + pi
theta = arctan2(1./(d2r*dy), sqrt((yy-cy)**2 + (xx-cx)**2))
mapdec = arcsin(sin(theta)*sin(y0*d2r) - cos(theta)*cos(phi)*cos(y0*d2r))/d2r
mapra = arcsin(cos(theta) * sin(phi) / cos(mapdec*d2r))/d2r + x0
if kw['method']=='griddata':
ragood = mapra[goodind]
decgood = mapdec[goodind]
avgood = avmap[goodind]
if verbose:
print 'ra.shape>>' + str(ra.shape)
# TBD: Vectorize this calculation; an interpolative solution
# should exist.
avlist = []
for ii in range(len(ra)):
if verbose:
print 'ra, dec>>' + str((ra[ii], dec[ii]))
if kw['method']=='nearest':
distmap = (mapra-ra[ii])**2 + (mapdec-dec[ii])**2
# If multiple values are equally near, average them:
val = avmap[distmap==distmap.min()].mean()
avlist.append(val)
elif kw['method']=='griddata':
avlist.append(griddata(ragood, decgood, avgood, array([ra[ii]]), array([dec[ii]])))
else:
print "Invalid method specified!"
avlist.append(-1)
return avlist
def readogle(filename, **kw):
""" Read in an OGLE-II photometry map file and output the data.
Returns a 6-tuple with each element an array:
0 -- RA. Strings of Right Ascension values.
1 -- DEC. Strings of Declination values
2 -- x_ref. OGLE x-coordinate, pixels
3 -- y_ref. OGLE y-coordinate, pixels
4 -- v_mag. OGLE V magnitude
5 -- i_mag. OGLE I magnitude
If you like, use HMS and DMS to convert the RA and DEC values returned."""
# 2008-12-21 18:53 IJC: Created
f = open(filename, 'r')
raw = f.readlines()
f.close()
nstars = len(raw)
raw2 = array([line.split() for line in raw])
ra = raw2[:,1]
dec = raw2[:,2]
xref = raw2[:,3]
yref = raw2[:,4]
vmag = raw2[:,5]
imag = raw2[:,7]
xref = [map(float, [x]) for x in xref]
yref = [map(float, [y]) for y in yref]
vmag = [map(float, [v]) for v in vmag]
imag = [map(float, [i]) for i in imag]
return (ra, dec, xref, yref, vmag, imag)
def hms(d, delim=':'):
"""Convert hours, minutes, seconds to decimal degrees, and back.
EXAMPLES:
hms('15:15:32.8')
hms([7, 49])
hms(18.235097)
Also works for negative values.
SEE ALSO: dms
"""
# 2008-12-22 00:40 IJC: Created
# 2009-02-16 14:07 IJC: Works with spaced or colon-ed delimiters
from numpy import sign
if d.__class__==str or hasattr(d, '__iter__'): # must be HMS
if d.__class__==str:
d = d.split(delim)
if len(d)==1:
d = d[0].split(' ')
if (len(d)==1) and (d.find('h')>-1):
d.replace('h',delim)
d.replace('m',delim)
d.replace('s','')
d = d.split(delim)
s = sign(float(d[0]))
if s==0: s=1
degval = float(d[0])*15.0
if len(d)>=2:
degval = degval + s*float(d[1])/4.0
if len(d)==3:
degval = degval + s*float(d[2])/240.0
return degval
else: # must be decimal degrees
hour = int(d/15.0)
d = abs(d)
min = int((d-hour*15.0)*4.0)
sec = (d-hour*15.0-min/4.0)*240.0
return [hour, min, sec]
def dms(d, delim=':'):
"""Convert degrees, minutes, seconds to decimal degrees, and back.
EXAMPLES:
dms('150:15:32.8')
dms([7, 49])
dms(18.235097)
Also works for negative values.
SEE ALSO: hms
"""
# 2008-12-22 00:40 IJC: Created
# 2009-02-16 14:07 IJC: Works with spaced or colon-ed delimiters
from numpy import sign
if d.__class__==str or hasattr(d, '__iter__'): # must be HMS
if d.__class__==str:
d = d.split(delim)
if len(d)==1:
d = d[0].split(' ')
s = sign(float(d[0]))
if s==0: s=1
degval = float(d[0])
if len(d)>=2:
degval = degval + s*float(d[1])/60.0
if len(d)==3:
degval = degval + s*float(d[2])/3600.0
return degval
else: # must be decimal degrees
if d<0:
sgn = -1
else:
sgn = +1
d = abs(d)
deg = int(d)
min = int((d-deg)*60.0)
sec = (d-deg-min/60.0)*3600.0
return [sgn*deg, min, sec]
def hess(color, mag, binsize, **kw):
"""Compute a hess diagram (surface-density CMD) on photometry data.
INPUT:
color
mag
binsize -- width of bins, in magnitudes
OPTIONAL INPUT:
cbin= -- set the centers of the color bins
mbin= -- set the centers of the magnitude bins
OUTPUT:
A 3-tuple consisting of:
Cbin -- the centers of the color bins
Mbin -- the centers of the magnitude bins
Hess -- The Hess diagram array"""
# cbin = out[0]
# mbin = out[1]
# imshow(out[2])
# yticks(range(0, len(mbin), 4), mbin[range(0,len(mbin),4)])
# xticks(range(0, len(cbin), 4), cbin[range(0,len(cbin),4)])
# ylim([ylim()[1], ylim()[0]])
# 2009-02-08 23:01 IJC: Created, on a whim, for LMC data (of course)
# 2009-02-21 15:45 IJC: Updated with cbin, mbin options
from numpy import arange, zeros
defaults = dict(mbin=None, cbin=None, verbose=False)
for key in defaults:
if (not kw.has_key(key)):
kw[key] = defaults[key]
if kw['mbin']==None:
mbin = arange(mag.min(), mag.max(), binsize)
else:
mbin = array(kw['mbin']).copy()
if kw['cbin']==None:
cbin = arange(color.min(), color.max(), binsize)
else:
cbin = array(kw['cbin']).copy()
hess = zeros((len(mbin), len(cbin)), float)
for ii in range(len(cbin)):
cindex = (color<(cbin[ii]+binsize/2)) * (color>(cbin[ii]-binsize/2))
for jj in range(len(mbin)):
index = cindex * (mag<(mbin[jj]+binsize/2)) * (mag>(mbin[jj]-binsize/2))
hess[jj,ii] = index.sum()
return (cbin, mbin, hess)
def snr(mag=20, itime=1., read=24.5, sky=8.43, npix=24., zero=26.44, dark=0.0):
"""Calculate SNR of a photometric CCD observation of given parameters.
The default keyword parameters are for the CTIO Blanco MOSAIC
imager, operating in V band.
"""
# 2009-02-20 14:40 IJC: Initiated
star = itime * 10**(0.4*(zero-mag))
noise = npix * (itime*(sky+dark)+read**2)
return star * (star+noise)**-0.5
#def sens(texp, mag, npix, tdead=15., rn=8., csky=12., zp=22.8, z=1.0,h=1280., d=1.0):
def sens(texp, mag, lin=3.8e5, tdead=15., rn=8., csky=12., zp=22.8, z=1.0,h=1280., d=1.0):
""" Calculate sensitiviy for various exposure times.
texp -- 1D array of times
mag -- target magnitude
npix -- number of pixels read out
lin -- linearity limit of the detector (ADU)
tdead -- detector dead time (reset + readout) between integrations
rn -- read noise, in ADU
csky -- sky background per second, in ADU
zp -- photometric zero point (magnitude for 1 ADU/sec)
z -- airmass
h -- observatory altitude, in meters
d -- telescope mirror diameter, in meters
TBD: Flat-field noise!
For now, I impose an artificial floor to the number of pixels
needed -- it won't spread it out over less than 8.
"""
# 2009-03-13 11:48 IJC: Written based on http://arxiv.org/abs/0903.2139
from numpy import exp, log10, sqrt, floor
texp = array(texp).copy()
ctarg = 10**(0.4*(zp-mag))
Ntarg = ctarg * texp
npix = floor(Ntarg / (lin-csky)) # spread out the light
pixind = npix<8
npix[pixind] = 8
Nsky = csky * texp * npix
sScint = 0.004 * d**(-2./3.) * z**1.75 * exp(-h/8e3) * (2*texp)**-0.5
sTarg = -2.5*log10((Ntarg - sqrt(Ntarg)) / Ntarg)
sSky = -2.5*log10((Ntarg - sqrt(Nsky)) / Ntarg)
sRON = -2.5*log10((Ntarg - rn*sqrt(npix)) / Ntarg)
sTotal = sqrt(sScint**2 + sSky**2 + sRON**2)
snrpertime = sTotal * sqrt(texp+tdead)
return sTotal, snrpertime, npix
def subreg(fn, center=None, dim=None, verbose=False):
"""Load a subsection of a list of 2D FITS files.
INPUTS:
fn -- (str) filename or (list) list of filenames to load
center -- (2-tuple) (x0,y0) center of region to load.
dim -- (2-tuple) (dx,dy) sizes of region to load.
OUTPUT:
region -- (array) (N x dx x dy)-sized stack of subregions
NOTE: center can also be a list of tuples (1 per file), but dim cannot."""
# 2009-09-24 11:20 IJC: Created
print "Function subreg is deprecated; please use subreg2 (which is much faster!)"
import astropy.io.fits as pyfits
#import pyfits
from numpy import zeros, nan, concatenate, int, float, max, min
if not hasattr(fn, '__iter__'):
fn = [fn]
if not hasattr(dim, '__iter__'):
dim = [dim, dim]
nfiles = len(fn)
if (center is None) and (dim is None):
try:
temp = pyfits.getdata(fn[0], ignore_missing_end=True)
except:
temp = pyfits.getdata(fn[0])
dosubreg = False
dim = list(temp.shape)
center = dim[0]/2, dim[1]/2
if not hasattr(center[0], '__iter__'):
center = [center]*nfiles
stack = zeros((0, dim[0], dim[1]), float)
dim = [dim]*nfiles
if verbose: print "fn, center, dim",(fn,center,dim)
iter = 0
for file, cen, sz in zip(fn, center, dim):
if verbose: print "file, cen, sz", (file,cen,sz)
try:
temp = pyfits.getdata(file, ignore_missing_end=True)
except:
temp = pyfits.getdata(file)
if verbose: print "file "+file+" shape is: "+str(temp.shape)
if len(temp.shape)<>2:
print "File %s did not return a 2D FITS file! Using nan." % file
subreg = zeros((1,dx,dy))+nan
else:
temp = temp.reshape((1,)+temp.shape)
xstartval = max([0,cen[0] - (sz[0]-1)/2.]).astype(int)
xendval = min([temp.shape[1], 1+cen[0] + (sz[0]-1)/2.]).astype(int)
ystartval = max([0,cen[1] - (sz[1]-1)/2.]).astype(int)
yendval = min([temp.shape[2], 1+cen[1] + (sz[1]-1)/2.]).astype(int)
if verbose: print "subregion limits: ", xstartval,xendval,ystartval,yendval
subreg = temp[:,xstartval:xendval,ystartval:yendval]
if file==fn[0] and stack.shape[1::]<>subreg.shape:
stack = zeros((0,)+subreg.shape[1::],float)
if verbose:
print "stack.shape>>", stack.shape
print "subreg.shape>>", subreg.shape
stack=concatenate((stack,subreg),0)
iter += 1
return stack
def subreg2(fn, center=None, dim=None, verbose=False):
"""Load a subsection of a list of 2D FITS files.
INPUTS:
fn -- str, list of str, or 2- or 3D Numpy array.
i) filename to load, OR
ii) list of filenames to load, OR
iii) Numpy array of data, already loaded.
center -- (2-tuple) (x0,y0) center of region to load.
dim -- (2-tuple) (dx,dy) sizes of region to load.
OUTPUT:
region -- (array)
(N x dx x dy)-sized stack of subregions. Note that this will
always be 3D!
NOTE: center can also be a list of tuples (1 per file), but dim cannot."""
# 2009-09-24 11:20 IJC: Created
# 2011-12-15 15:53 IJMC: Updated to use memmap. MUCH faster!
# 2012-06-16 06:57 IJMC: Now 'fn' can also be a Numpy array.
import astropy.io.fits as pyfits
#import pyfits
from numpy import zeros, nan, concatenate, int, float, max, min
if not hasattr(fn, '__iter__'):
fn = [fn]
elif isinstance(fn, np.ndarray):
if fn.ndim==3:
nfiles = fn.shape[0]
elif fn.ndim==2:
fn = fn.reshape((1,)+fn.shape)
nfiles = len(fn)
if dim is not None and (not hasattr(dim, '__iter__')):
dim = [dim, dim]
if center is not None and (not hasattr(center[0], '__iter__')):
center = [center]*nfiles
if (center is None) and (dim is None):
try:
temp = pyfits.getdata(fn[0], ignore_missing_end=True)
except:
temp = pyfits.getdata(fn[0])
dosubreg = False
dim = list(temp.shape)
center = dim[0]/2, dim[1]/2
stack = zeros((0, dim[0], dim[1]), float)
dim = [dim]*nfiles
#center = [center]*nfiles
if verbose: print "fn, center, dim",(fn,center,dim)
iter = 0
for file, cen, sz in zip(fn, center, dim):
if verbose: print "file, cen, sz", (file,cen,sz)
if isinstance(file, np.ndarray):
f = file
else:
try:
f = pyfits.open(file, ignore_missing_end=True, memmap=True)
except:
f = pyfits.open(file, memmap=True)
#if verbose: print "file "+file+" shape is: "+str(temp.shape)
#if len(temp.shape)<>2:
# print "File %s did not return a 2D FITS file! Using nan." % file
# subreg = zeros((1,dx,dy))+nan
#else:
if iter==0:
if isinstance(f, np.ndarray):
temp = f.reshape((1,) + f.shape)
else:
temp = f[0].data#.reshape([1]+dim[0])
temp = temp.reshape((1,) + temp.shape)
xstartval = max([0,cen[0] - (sz[0]-1)/2.]).astype(int)
xendval = min([temp.shape[1], 1+cen[0] + (sz[0]-1)/2.]).astype(int)
ystartval = max([0,cen[1] - (sz[1]-1)/2.]).astype(int)
yendval = min([temp.shape[2], 1+cen[1] + (sz[1]-1)/2.]).astype(int)
if verbose: print "subregion limits: ", xstartval,xendval,ystartval,yendval
subreg = temp[0,xstartval:xendval,ystartval:yendval]
stack = zeros((nfiles,) + subreg.shape, float)
else:
this_dy = center[0][0] - center[iter][0]
this_dx = center[0][1] - center[iter][1]
try:
subreg = f[0].data[xstartval+this_dx:xendval+this_dx, \
ystartval+this_dy:yendval+this_dy]
except IndexError:
print "Likely invalid center position for Frame %i (%s): %s." % \
(iter, file, str(center[iter]))
print "Using offsets for Frame 0 (%s) instead." % str(center[0])
subreg = f[0].data[xstartval:xendval, ystartval:yendval]
except:
print "Some error occurred. Using offsets for Frame 0 (%s) instead." % \
str(center[0])
subreg = f[0].data[xstartval:xendval, ystartval:yendval]
stack[iter] = subreg
iter += 1
return stack
def aperphot(fn, timekey=None, pos=[0,0], dap=[2,4,6], mask=None, verbose=False, nanval=999, resamp=None, retfull=False):
"""Do aperture photometry on a specified file.
:INPUTS:
pos : 2-sequence
center of apertures (as if indexing: fn[pos[0], pos[1]])
dap : 3-sequence
Photometry aperture DIAMETERS:
-- target aperture (within which to sum flux)
-- inner sky aperture
-- outer sky aperture
resamp : int
Factor by which to interpolate frame before measuring photometry
(in essence, does partial-pixel aperture photometry)
Aperture masking:
If no mask is passed in, use the star's input position and
aperture diameters to create binary pixel masks for stellar and
background photometry. If a mask is passed in, stellar and
background aperture masks are generated from where all input
mask elements equal 1 and 2, respectively.
retfull:
Also return arrays of target mask, sky mask, and frame used.
This option is a memory hog!
:OUTPUTS:
:class:`phot` object.
:EXAMPLE:
::
import astropy.io.fits
from astropy import wcs
import numpy as np
from phot import aperphot
img='62_z_CDFs_goods_stamp_img.fits' #path to the image
RA = 52.9898239
DEC = -27.7143114
hdulist = astropy.io.fits.open(img)
w = wcs.WCS(hdulist['PRIMARY'].header)
world = np.array([[RA, DEC]])
pix = w.wcs_world2pix(world,1) # Pixel coordinates of (RA, DEC)
print "Pixel Coordinates: ", pix[0,0], pix[0,1]
#call aperture function
observation=aperphot(img, timekey=None, pos=[pix[0,0], pix[0,1]], dap=[4,8,12], resamp=2, retfull=False)
# Print outputs
print "Aperture flux:", observation.phot
print "Background: ", observation.bg
:REQUIREMENTS:
scipy.interpolate, pyfits, numpy...
"""
# 2009-09-14 10:49 IJC: Created
# 2010-01-15 14:20 IJC: Added numpy "_string" check
# 2011-12-29 12:01 IJMC: Added peak pixel values to photometry report.
# 2012-01-25 11:26 IJMC: Adding "resamp" option -- thanks to
# K. Stevenson and J. Harrington of UCF for
# the suggestion.
# 2012-02-26 11:53 IJMC: Now return 'ntarg' and 'nsky' -- number of pixels used.
# 2012-06-07 08:27 IJMC: 'peak' values are now corrected for the
# resampling factor.
# 2012-07-03 10:35 IJMC: Fixed a key bug: frames were not
# correctly background-subtracted when
# applying partial-pixel resampling.
# 2012-10-19 13:41 IJMC: Documented 'retfull' option; changed default.
# 2013-03-20 09:21 IJMC: More error-checking for saving header
# keywords. Thanks to A. Weigel @
# ETH-Zurich for catching this!
# 2014-05-06 13:12 JXP: Eliminate fixval calls
from numpy import meshgrid, median,isfinite,sort,ndarray,string_
import numpy as np
import astropy.io.fits as pyfits
#import pyfits
#from xastropy.phot.ian_analysis import fixval
from os import path
from scipy import interpolate
thisobs = phot()
x0, y0 = pos
dap_targ, dap_skyinner, dap_skyouter = dap
if resamp is None or resamp<1:
resamp = 1
else:
resamp = float(resamp)
# Determine size:
if isinstance(fn,str):
nx = pyfits.getval(fn, 'NAXIS1')
ny = pyfits.getval(fn, 'NAXIS2')
elif isinstance(fn,ndarray):
nx,ny = fn.shape
nx0, ny0 = nx, ny
nx = ((nx - 1)*resamp + 1.) # Avoid resampling at pixel locations
ny = ((ny - 1)*resamp + 1.) # outside the original boundaries.
# Generate or load masks:
if mask==None:
xx,yy = meshgrid(np.arange(nx)/resamp, np.arange(ny)/resamp) # JXP 2014 May 6
#xx,yy = meshgrid(np.arange(ny)/resamp, np.arange(nx)/resamp)
mask_targ = makemask(xx, yy, (x0, y0, dap_targ))
mask_s1 = makemask(xx, yy, (x0,y0, dap_skyinner))
mask_s2 = makemask(xx, yy, (x0,y0, dap_skyouter))
mask_sky = mask_s2 - mask_s1
else:
mask_targ = mask==1
mask_sky = mask==2
if resamp>1:
print "In aperphot, resamp>1 and user-specified mask passed in... beware!"
# Load data frame:
thisobs = phot()
if isinstance(fn,ndarray):
frame = fn
elif isinstance(fn, str) or isinstance(fn,string_):
if not path.isfile(fn):
print "file %s not found! exiting..." % fn
return thisobs
frame = pyfits.getdata(fn)
#fixval(frame, nanval)
# Resample data frame
if resamp>1:
frame0 = frame.copy()
xx0 = range(nx0)
yy0 = range(ny0)
x1,y1 = np.arange(nx)/resamp, np.arange(ny)/resamp
rectspline = interpolate.fitpack2.RectBivariateSpline(xx0, yy0, frame0, kx=1, ky=1, s=0)
frame = rectspline(x1, y1)
#from pylab import *
# Measure background and aperture photometry
thisbg, thisebg = estbg(frame, mask=mask_sky, plotalot=verbose, rout=[3,99])
thisphot = (mask_targ*(frame - thisbg)).sum() /resamp/resamp
peak = frame.max()
peak_targ = (mask_targ * frame).max()
peak_annulus = (mask_sky * frame).max()
#pdb.set_trace()
thisobs.bg=thisbg
thisobs.ebg=thisebg
thisobs.bgstr='phot.estbg: SDOM on bg histogram mean & dispersion after outlier rejection'
thisobs.phot=thisphot
thisobs.photstr='by-hand background-subtracted aperture photometry'
thisobs.ntarg = mask_targ.sum()/resamp/resamp
thisobs.nsky = mask_sky.sum()/resamp/resamp
#pdb.set_trace()
# Simple stats :: JXP 2014 May 6
var = thisphot + np.sqrt(thisobs.nsky)*thisobs.bg
thisobs.ephot = np.sqrt(var)
thisobs.peak = peak
thisobs.peak_targ = peak_targ
thisobs.peak_annulus = peak_annulus
thisobs.peakstr = 'peak pixel value in frame'
thisobs.peak_targstr = 'peak pixel value in target aperture'
thisobs.peak_annulusstr = 'peak pixel value in sky annulus'
thisobs.position = pos
thisobs.positionstr = 'user-specified, zero-indexed pixel coordinates.'
if isinstance(fn, str):
header = pyfits.getheader(fn)
if not timekey==None:
if timekey in header:
thisobs.time=header['timekey']
thisobs.timestr='heliocentric modified julian date'
if 'object' in header: thisobs.object = header['object']
if 'exptime' in header: thisobs.exptime = header['exptime']
thisobs.aper = dap
thisobs.aperstr = 'target, inner, outer aperture diameters, in pixels.'
thisobs.filename=fn
thisobs.resamp = resamp
if retfull:
thisobs.mask_targ = mask_targ
thisobs.mask_sky = mask_sky
thisobs.frame = frame
if verbose:
from pylab import figure, colorbar
from nsdata import imshow
figure(); imshow(frame*mask_targ); colorbar()
figure(); imshow(frame*mask_sky); colorbar()
return thisobs
def psffiterr(xyoffset, psf, frame, w=None, scale=100, dframe=9, verbose=False):
""" Usage:
optimize.fmin_powell(psffiterr, [0,0], args=(psf, stack[6],goodind[6],100,13),xtol=0.5,ftol=1)
"""
# 2009-11-13 11:27 IJC: Created
from numpy import round
import pdb
out = psffit(psf, frame, loc=None, w=w, scale=scale, dframe=dframe, \
xoffs=[round(xyoffset[0])], yoffs=[round(xyoffset[1])], verbose=verbose)
sumsqres = out[2][0,0]
#pdb.set_trace()
return sumsqres
def psffit(psf, frame, loc=None, w=None, scale=100, dframe=9, xoffs=range(0,100,10), yoffs=range(0,100,10), verbose=False):
"""
INPUT:
psf -- model PSF (supersampled by 'scale')
frame -- science frame to which psf will be fit. best if it's odd-sized
OPTIONAL INPUTS:
loc -- (x,y) integers, star location in data frame (e.g., data[x,y])
w -- [array] weights of pixels in data frame, (typ. 1/sigma^2)
scale -- supersampling level of PSF
dframe -- [odd int] diameter of square box around target location
xoffs -- [int array] subpixel x offsets to test.
yoffs -- [int array] subpixel y offsets to test.
"""
# 2009-10-07 14:18 IJC: Created.
# 2011-05-11 22:16 IJC: Added a try/except/pdb debugging step
from numpy import arange, prod, zeros,vstack,dot, nonzero, diag,floor,abs,max,hstack
from numpy.linalg import pinv
from xastropy.phot.ian_analysis import binarray
from time import time
import pdb
tic = time()
if w==None:
w = zeros(frame.shape)+1
if loc==None:
loc = ((frame.shape[1]-1)/2,(frame.shape[0]-1)/2)
if xoffs==None:
xoffsets = arange(scale) #arange(scale)
if yoffs==None:
yoffs = arange(scale) #arange(scale)
ycen = int(loc[0])
xcen = int(loc[1])
pycen, pxcen = nonzero(psf==psf.max())
# Pick a thumbnail frame size to use, and cut it out of the larger frame
if verbose:
print "frame.shape>>", frame.shape
print "(xcen, ycen, dframe)>>", xcen,ycen,dframe
print "limits>>" , (ycen-(dframe-1)/2.), (ycen+(dframe+1)/2.), (xcen-(dframe-1)/2.), (xcen+(dframe+1)/2.)
print "xoffs, yoffs>>", xoffs, yoffs
data = frame[(ycen-(dframe-1)/2.):(ycen+(dframe+1)/2.),
(xcen-(dframe-1)/2.):(xcen+(dframe+1)/2.)]
weights = w[(ycen-(dframe-1)/2.):(ycen+(dframe+1)/2.),
(xcen-(dframe-1)/2.):(xcen+(dframe+1)/2.)]
wmat = diag(weights.ravel())
extrasize = 2*max(abs(floor(1.0*hstack((xoffs,yoffs))/scale)))
exs = extrasize*scale/2
if verbose: print "extrasize>> ",extrasize
# Determine how much of the PSF to cut out, and cut it out
dpsf0 = (dframe+1)*scale-1
dpsf1 = dframe*scale-1
pxmin = int(pxcen-(dpsf0-1)/2-exs)
pxmax = int(pxcen+(dpsf0+1)/2+exs)
pymin = int(pycen-(dpsf0-1)/2-exs)
pymax = int(pycen+(dpsf0+1)/2+exs)
if verbose: print psf.shape, pymin, pymax, pxmin, pxmax
smpsf = psf[pymin:pymax, pxmin:pxmax]
if verbose: print "data.shape>>" , data.shape
ndata = prod(data.shape)
if verbose: print "ndata>> %i" % ndata
const = zeros(ndata,float)+1
background = zeros((len(xoffs),len(yoffs)), float)
fluxscale = zeros((len(xoffs),len(yoffs)), float)
chisq = zeros((len(xoffs),len(yoffs)), float)
if verbose:
print "wmat.shape>>", wmat.shape
print "data.ravel().shape>>", data.ravel().shape
wpmat = dot(wmat, data.ravel()) # outside of loop for efficiency
dfs = dframe * scale # outside of loop for efficiency
initoffset_min = scale - 1 + exs # outside of loop for efficiency
initoffset_max = scale - 1 + exs + dfs # outside of loop for efficiency
nx = len(xoffs)
ny = len(yoffs)
rangeny = range(ny)
for ii in range(nx):
xoffset = xoffs[ii]
xmin, xmax = int(initoffset_min-xoffset), int(initoffset_max-xoffset)
for jj in rangeny:
# Cut out the correct portion of the model PSF
yoffset = yoffs[jj]
ymin, ymax = int(initoffset_min-yoffset), int(initoffset_max-yoffset)
# Bin down the PSF by the correct factor. Sizes should now match!
binpsf = binarray(smpsf[ymin:ymax, xmin:xmax],scale)
# Compute the best-fit background & PSF scaling factor
if verbose:
print "xmat shapes>> ",const.shape, binpsf.ravel().shape
print "ymin,ymax,xmin,xmax>> ",ymin,ymax, xmin,xmax
print "binpsf.shape>> ",binpsf.shape
try:
xmat = vstack((const,binpsf.ravel())).transpose()
wxmat = dot(wmat,xmat)
fitvec = dot(pinv(wxmat), wpmat)
background[ii,jj], fluxscale[ii,jj] = fitvec
chisq[ii,jj] = ((dot(wxmat, fitvec) - wpmat)**2).sum()
except:
print "error occurred"
chisq[ii,jj] = 9e99
pdb.set_trace()
ii,jj = nonzero(chisq==chisq.min())
if len(ii)>1: # more than one chisquared minimum found!
ii = ii[0]
jj = jj[0]
xoffset = xoffs[ii]
xmin, xmax = int(scale-xoffset-1+exs), int(scale-xoffset-1 + dfs+exs)
yoffset = yoffs[jj]
ymin, ymax = int(scale-yoffset-1+exs), int(scale-yoffset-1 + dfs+exs)
# Bin down the PSF by the correct factor. Sizes should now match!
binpsf = binarray(smpsf[ymin:ymax, xmin:xmax],scale)
modelpsf = background[ii,jj]+fluxscale[ii,jj]*binpsf
#print "%f seconds for completion!" % (time()-tic)
return modelpsf, data, chisq, background, fluxscale, xoffset, yoffset, chisq[ii,jj],background[ii,jj], fluxscale[ii,jj]
def prffit(prf, frame, loc=None, w=None, scale=100, dframe=9, xoffs=range(0,100,10), yoffs=range(0,100,10), verbose=False):
"""
INPUT:
prf -- model PRF (supersampled by 'scale')
frame -- science frame to which psf will be fit. best if it's odd-sized
OPTIONAL INPUTS:
loc -- (x,y) integers, star location in data frame (e.g., data[x,y])
w -- [array] weights of pixels in data frame, (typ. 1/sigma^2)
scale -- supersampling level of PSF
dframe -- [odd int] diameter of square box around target location
xoffs -- [int array] subpixel x offsets to test.
yoffs -- [int array] subpixel y offsets to test.
"""
# 2009-10-07 14:18 IJC: Created.
from numpy import arange, prod, zeros,vstack,dot, nonzero, diag,floor,abs,max,hstack
from numpy.linalg import pinv
from xastropy.phot.ian_analysis import binarray
from time import time
tic = time()
if w==None:
w = zeros(frame.shape)+1
if loc==None:
loc = ((frame.shape[1]-1)/2,(frame.shape[0]-1)/2)
if xoffs==None:
xoffsets = arange(scale) #arange(scale)
if yoffs==None:
yoffs = arange(scale) #arange(scale)
ycen = int(loc[0])
xcen = int(loc[1])
pycen, pxcen = nonzero(prf==prf.max())
# Pick a thumbnail frame size to use, and cut it out of the larger frame
if verbose:
print "frame.shape>>", frame.shape
print "(xcen, ycen, dframe)>>", xcen,ycen,dframe
print "limits>>" , (ycen-(dframe-1)/2.), (ycen+(dframe+1)/2.), (xcen-(dframe-1)/2.), (xcen+(dframe+1)/2.)
data = frame[(ycen-(dframe-1)/2.):(ycen+(dframe+1)/2.),
(xcen-(dframe-1)/2.):(xcen+(dframe+1)/2.)]
weights = w[(ycen-(dframe-1)/2.):(ycen+(dframe+1)/2.),
(xcen-(dframe-1)/2.):(xcen+(dframe+1)/2.)]
wmat = diag(weights.ravel())
extrasize = 2*max(abs(floor(1.0*hstack((xoffs,yoffs))/scale)))
exs = extrasize*scale/2
if verbose: print "extrasize>> ",extrasize
if verbose: print "data.shape>>" , data.shape
ndata = prod(data.shape)
if verbose: print "ndata>> %i" % ndata
const = zeros(ndata,float)+1
background = zeros((len(xoffs),len(yoffs)), float)
fluxscale = zeros((len(xoffs),len(yoffs)), float)
chisq = zeros((len(xoffs),len(yoffs)), float)
if verbose:
print "wmat.shape>>", wmat.shape
print "data.ravel().shape>>", data.ravel().shape
wpmat = dot(wmat, data.ravel()) # outside of loop for efficiency
dfs = dframe * scale # outside of loop for efficiency
initoffset_min = scale - 1 + exs # outside of loop for efficiency
initoffset_max = scale - 1 + exs + dfs # outside of loop for efficiency
nx = len(xoffs)
ny = len(yoffs)
rangeny = range(ny)
for ii in range(nx):
xoffset = xoffs[ii]
xmin, xmax = int(initoffset_min-xoffset), int(initoffset_max-xoffset)
for jj in rangeny:
# Extract the correct indices of the model PRF. Sizes should now match!
yoffset = yoffs[jj]
ymin, ymax = int(initoffset_min-yoffset), int(initoffset_max-yoffset)
binpsf = prf[indy, indx]
# Compute the best-fit background & PSF scaling factor
if verbose and (ii==0) and (jj==0):
print "xmat shapes>> ",const.shape, binpsf.ravel().shape
print "ymin,ymax,xmin,xmax>> ",ymin,ymax, xmin,xmax
print "binpsf.shape>> ",binpsf.shape
xmat = vstack((const,binpsf.ravel())).transpose()
wxmat = dot(wmat,xmat)
fitvec = dot(pinv(wxmat), wpmat)
background[ii,jj], fluxscale[ii,jj] = fitvec
chisq[ii,jj] = ((dot(wxmat, fitvec) - wpmat)**2).sum()
ii,jj = nonzero(chisq==chisq.min())
if len(ii)>1: # more than one chisquared minimum found!
ii = ii[0]
jj = jj[0]
xoffset = xoffs[ii]
xmin, xmax = int(scale-xoffset-1+exs), int(scale-xoffset-1 + dfs+exs)
yoffset = yoffs[jj]
ymin, ymax = int(scale-yoffset-1+exs), int(scale-yoffset-1 + dfs+exs)
# Bin down the PSF by the correct factor. Sizes should now match!
binpsf = binarray(smpsf[ymin:ymax, xmin:xmax],scale)
modelpsf = background[ii,jj]+fluxscale[ii,jj]*binpsf
print "%f seconds for completion!" % (time()-tic)
return modelpsf, data, chisq, background, fluxscale, xoffset, yoffset, chisq[ii,jj],background[ii,jj], fluxscale[ii,jj]
def gauss2d(param, x, y):
""" Compute a gaussian distribution at the points x, y.
p is a three- or four-component array, list, or tuple:
z = [p4 +] p0/(p1*sqrt(4pi)) * exp(-r**2 / (2*p1**2))
where r = sqrt((x-p2)**2 + (y-p3)**2)
p[0] -- Volume under the gaussian
p[1] -- one-sigma dispersion
p[2] -- X- offset of center
p[3] -- Y- offset of center
p[4] -- optional constant, vertical offset
x & y must be equal-sized arrays from numpy.meshgrid
NOTE: FWHM = 2*sqrt(2*ln(2)) * p1 ~ 2.3548*p1
SEE ALSO: egauss2d, numpy.meshgrid"""
#2010-01-11 22:46 IJC: Created
from numpy import array, abs, concatenate, exp
x = array(x, dtype=float).copy()
y = array(y, dtype=float).copy()
p = array(param).copy()
r = abs((x-p[2]) + 1j*(y-p[3]))
if len(p)==4:
p = concatenate((p, [0]))
z = p[4] + p[0]/(p[1]*4*pi) * exp(-r**2 / (2*p[1]**2))
return z
def egauss2d(param, x, y, z, ez=None):
""" Return the chi-squared error on a 2D gaussian fit. See gauss2d
for more details on the input parameters.
z is the array of data to be fit
ez is an optional array of one-sigma errors to the data in z.
"""
# 2010-01-11 22:59 IJC: Created
from numpy import array, float, ones
x = array(x, dtype=float).copy()
y = array(y, dtype=float).copy()
z = array(z, dtype=float).copy()
p = array(param).copy()
if ez==None:
ez = ones(z.shape,float)
else:
ez = array(ez, dtype=float).copy()
model = gauss2d(param,x,y)
chisq = (((z-model)/ez)**2).sum()
return chisq
| bsd-3-clause |
schae234/Camoco | tests/test_COB.py | 2 | 5362 | '''
COB Tests
'''
import camoco as co
from camoco import cf
import random
import itertools
from collections import Counter
import pytest
import pandas as pd
import numpy as np
from scipy.misc import comb
def test_coordination_between_expr_and_expr_index(testCOB):
for i,x in enumerate(testCOB._expr.index):
assert i == testCOB._expr_index[x]
def test_coordination_between_expr_index_and_coex_index(testCOB):
assert True
return
assert set(itertools.chain(*testCOB.coex.index.values)) \
== set(testCOB._expr.index.values)
def test_expr_nans_in_same_place(testCOB):
pass
def test_shape(testCOB):
assert len(testCOB.coex) == comb(testCOB.num_genes(), 2)
def test_coex_score_concordance(testCOB):
for a, b in itertools.combinations(
[testCOB.refgen.random_gene() for x in range(cf.test.num)], 2
):
assert (
abs(
testCOB.coexpression(a, b).score \
- testCOB._coex_concordance(a, b)) < 0.001
) or ( np.isnan(testCOB.coexpression(a, b).score)
and np.isnan(testCOB._coex_concordance(a, b))
)
def test_coex_distance_concordance(testCOB):
for a, b in itertools.combinations(
[testCOB.refgen.random_gene() for x in range(cf.test.num)], 2
):
dis_dif = abs(testCOB.coexpression(a, b).distance - abs(a-b))
assert np.isnan(dis_dif) or dis_dif < 100
def test_coex_id_concordance(testCOB):
for a, b in itertools.combinations(
[testCOB.refgen.random_gene() for x in range(cf.test.num)], 2
):
assert sorted(testCOB.coexpression(a,b).name) == sorted([a.id,b.id])
def test_coex_to_expr_concordance(testCOB):
'''
Translate expr indexes to coex indexes and back again
'''
# Get Random set of expr indexes
expr_len = testCOB._expr.shape[0]
expr_idxs = np.sort(np.unique(np.array(
[random.randint(0,expr_len-1) for i in range(cf.test.num*10)]
)))
# Translate them back and forth
coex_idxs = co.PCCUP.coex_index(expr_idxs, expr_len)
new_expr_idxs = co.PCCUP.coex_expr_index(coex_idxs, expr_len).flatten()
# Check all elements are the same in both
if not (expr_idxs.shape[0] == np.unique(new_expr_idxs).shape[0]):
assert False
res = (expr_idxs == np.unique(new_expr_idxs))
if not (np.sum(res) == res.shape[0]):
assert False
# Check all values occur the proper number of times
corr_count = len(expr_idxs)-1
bad_vals = []
for k,v in Counter(new_expr_idxs).items():
if not (v == corr_count):
bad_vals.append((k,v))
assert bad_vals == []
def test_num_neighbors_equals_degree(testCOB):
random_gene = testCOB.refgen.random_gene()
assert len(testCOB.neighbors(random_gene)) \
== testCOB.global_degree(random_gene)
assert len(testCOB.neighbors(random_gene,return_gene_set=True)) \
== testCOB.global_degree(random_gene)
assert len(testCOB.neighbors(random_gene,names_as_index=False)) \
== testCOB.global_degree(random_gene)
def test_subnetwork_contains_only_input_genes(testCOB):
random_genes = set(testCOB.refgen.random_genes(n=cf.test.num))
subnet = testCOB.subnetwork(random_genes,sig_only=False)
assert set(itertools.chain(*subnet.index.values)) == set([x.id for x in random_genes])
def test_subnetwork_contains_only_input_when_duplicates(testCOB):
random_genes = list(testCOB.refgen.random_genes(n=cf.test.num))
# Add duplicates
random_genes = random_genes + random_genes[-10:-1]
subnet = testCOB.subnetwork(random_genes,sig_only=False)
assert set(itertools.chain(*subnet.index.values)) == set([x.id for x in random_genes])
def test_degree_index_matches_degree(testCOB):
# Compare the degree determined from subnetwork aggregation
# is the same as what is in the data frame
for k,v in Counter(itertools.chain(*testCOB.subnetwork().index.values)).items():
assert testCOB.degree.ix[k].Degree == v
def test_empty_subnetwork_returns_proper_dataframe(testCOB):
subnet = testCOB.subnetwork([])
assert len(subnet) == 0
assert 'score' in subnet.columns
assert 'significant' in subnet.columns
assert 'distance' in subnet.columns
def test_zero_index_genes_doesnt_get_filtered(testCOB):
''' ---- Regression bug
This bug occured when one of the genes in the subnetwork list
had an index of 0, which was filtered out because the list filter
function thought it was None
'''
# get the first gene in the matrix
gene_a = testCOB.refgen[testCOB._expr.index[0]]
# get a random gene
gene_b = testCOB.refgen.random_gene()
# make sure that the subnetwork contains a single coexpression
# entry between genes
assert len(testCOB.subnetwork([gene_a,gene_b],sig_only=False)) == 1
def test_zero_degree_genes_return_empty_dataframe(testCOB):
# get a random zero degree gene
gene_id = testCOB.degree.ix[testCOB.degree.Degree==0].sample(1).index[0]
gene = testCOB.refgen[gene_id]
assert len(testCOB.neighbors(gene)) == 0
def test_repr(testCOB):
assert repr(testCOB).startswith('<COB:')
def test_str(testCOB):
assert str(testCOB).startswith('<COB:')
def test_qc_gene(testCOB):
assert isinstance(testCOB.qc_gene(),pd.DataFrame)
| mit |
nschloe/python4gmsh | test/occ/helpers.py | 6 | 2631 | import math
import numpy as np
def prune_nodes(points, cells):
# Only points/cells that actually used
uvertices, uidx = np.unique(cells, return_inverse=True)
cells = uidx.reshape(cells.shape)
points = points[uvertices]
return points, cells
def get_triangle_volumes(pts, cells):
# Works in any dimension; taken from voropy
local_idx = np.array([[1, 2], [2, 0], [0, 1]]).T
idx_hierarchy = cells.T[local_idx]
half_edge_coords = pts[idx_hierarchy[1]] - pts[idx_hierarchy[0]]
ei_dot_ej = np.einsum(
"ijk, ijk->ij", half_edge_coords[[1, 2, 0]], half_edge_coords[[2, 0, 1]]
)
vols = 0.5 * np.sqrt(
+ei_dot_ej[2] * ei_dot_ej[0]
+ ei_dot_ej[0] * ei_dot_ej[1]
+ ei_dot_ej[1] * ei_dot_ej[2]
)
return vols
def get_simplex_volumes(pts, cells):
"""Signed volume of a simplex in nD. Note that signing only makes sense for
n-simplices in R^n.
"""
n = pts.shape[1]
assert cells.shape[1] == n + 1
p = pts[cells]
p = np.concatenate([p, np.ones(list(p.shape[:2]) + [1])], axis=-1)
return np.abs(np.linalg.det(p) / math.factorial(n))
def compute_volume(mesh):
if "tetra" in mesh.cells_dict:
vol = math.fsum(
get_simplex_volumes(*prune_nodes(mesh.points, mesh.cells_dict["tetra"]))
)
elif "triangle" in mesh.cells_dict or "quad" in mesh.cells_dict:
vol = 0.0
if "triangle" in mesh.cells_dict:
# triangles
vol += math.fsum(
get_triangle_volumes(
*prune_nodes(mesh.points, mesh.cells_dict["triangle"])
)
)
if "quad" in mesh.cells_dict:
# quad: treat as two triangles
quads = mesh.cells_dict["quad"].T
split_cells = np.column_stack(
[[quads[0], quads[1], quads[2]], [quads[0], quads[2], quads[3]]]
).T
vol += math.fsum(
get_triangle_volumes(*prune_nodes(mesh.points, split_cells))
)
else:
assert "line" in mesh.cells_dict
segs = np.diff(mesh.points[mesh.cells_dict["line"]], axis=1).squeeze()
vol = np.sum(np.sqrt(np.einsum("...j, ...j", segs, segs)))
return vol
def plot(filename, points, triangles):
from matplotlib import pyplot as plt
pts = points[:, :2]
for e in triangles:
for idx in [[0, 1], [1, 2], [2, 0]]:
X = pts[e[idx]]
plt.plot(X[:, 0], X[:, 1], "-k")
plt.gca().set_aspect("equal", "datalim")
plt.axis("off")
# plt.show()
plt.savefig(filename, transparent=True)
| bsd-3-clause |
kpegion/SubX | Python/NAO_index/point_based/createNAO_index_template.py | 1 | 2998 | """ Create NAO index.
The file is filled in by generate_NAO_index.ksh.
"""
import os
import xarray as xr
import pandas as pd
# Sections of code to run
forecast = 1 # 1, 0
mme_forecast = 0 # 1, 0
ERA_Interim = 0 # 1, 0
# Inputs
moPath = 'moddir'
ft = 'ftype'
mo = 'mod'
ins = 'inst'
va = 'var'
pl = plev
nyv = nlat.0
nxv = nlon.0
syv = slat.0
sxv = slon.0
subsampletime = subsampleS
starttime = 'startS'
endtime = 'endS'
obsPath = 'obsdir'+va+'/'+str(pl)+'/'
nysave = str(int(nyv))
nxsave = str(int(nxv))
sysave = str(int(syv))
sxsave = str(int(sxv))
anomDir = moPath+ft+'/'+mo+'/'+va+'/'+str(pl)+'/daily/anom/'
anomfname = 'daily_anomalies.y'
obsanomPath = obsPath+'daily/SubX/anom/'
obsanomfname = 'daily_anomalies.y'
NAOpath = moPath+ft+'/'+mo+'/'+va+'/'+str(pl)+'/daily/NAO/point_based/'
if not os.path.isdir(NAOpath):
os.makedirs(NAOpath)
obsNAOpath = obsPath+'daily/SubX/NAO/point_based/'
if not os.path.isdir(obsNAOpath):
os.makedirs(obsNAOpath)
NAOfname = 'np.y'+nysave+'.x'+nxsave+'.sp.y'+sysave+'.x'+sxsave+'.nc'
obsNAOfname = 'np.y'+nysave+'.x'+nxsave+'.sp.y'+sysave+'.x'+sxsave+\
'.SubX.'+mo+'.nc'
# Sub-sample time
if 0 == subsampletime:
_rd = xr.open_dataarray(url+ins+'/.'+mo+'/.'+ft+'/.'+va+'/dods')
starttime = pd.Timestamp(_rd.S.values[0]).strftime('%Y-%m-%d')
endtime = pd.Timestamp(_rd.S.values[-1]).strftime('%Y-%m-%d')
# Update file names
anomfname = starttime+'.'+endtime+'.'+anomfname
obsanomfname = starttime+'.'+endtime+'.'+obsanomfname
NAOfname = starttime+'.'+endtime+'.'+NAOfname
obsNAOfname = starttime+'.'+endtime+'.'+obsNAOfname
if forecast == 1:
# Read in north point
nda = xr.open_dataarray(anomDir+anomfname+nysave+'.x'+nxsave+'.nc')
# Read in south point
sda = xr.open_dataarray(anomDir+anomfname+sysave+'.x'+sxsave+'.nc')
# Loop over ensembles
for i, e in enumerate(nda.M):
esave = str(int(e))
_np = nda.sel(M=e)
_sp = sda.sel(M=e)
nao = (_sp - _np) / (_sp - _np).std(dim='S')
nao.attrs['long_name'] = 'NAO'
nao.to_netcdf(NAOpath+'e'+esave+'.'+NAOfname)
# Ensemble mean
_np = nda.mean(dim='M')
_sp = sda.mean(dim='M')
nao = (_sp - _np) / (_sp - _np).std(dim='S')
nao.attrs['long_name'] = 'NAO'
nao.to_netcdf(NAOpath+'emean.'+NAOfname)
if mme_forecast == 1:
nda = xr.open_dataarray(anomDir+anomfname+nysave+'.x'+nxsave+'.nc')
sda = xr.open_dataarray(anomDir+anomfname+sysave+'.x'+sxsave+'.nc')
nao = (sda - nda) / (sda - nda).std(dim='S')
nao.attrs['long_name'] = 'NAO'
nao.to_netcdf(NAOpath+'emean.'+NAOfname)
if ERA_Interim == 1:
nda = xr.open_dataarray(obsanomPath+obsanomfname+nysave+'.x'+nxsave+\
'.SubX.'+mo+'.nc')
sda = xr.open_dataarray(obsanomPath+obsanomfname+sysave+'.x'+sxsave+\
'.SubX.'+mo+'.nc')
nao = (sda - nda) / (sda - nda).std(dim='S')
nao.attrs['long_name'] = 'NAO'
nao.to_netcdf(obsNAOpath+obsNAOfname)
| mit |
stscieisenhamer/ginga | ginga/util/toolbox.py | 1 | 5038 | #
# toolbox.py -- Goodies for enhancing a Ginga viewer
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# STDLIB
import io
import os
import warnings
# THIRD-PARTY
from astropy.utils.data import get_pkg_data_contents
from astropy.utils.exceptions import AstropyUserWarning
class ModeIndicator(object):
"""
This class adds a mode status indicator to a viewer's lower right-hand
corner.
Usage:
Instantiate this class with a Ginga ImageView{Toolkit} object as the
sole constructor argument. Save a reference to the mode indicator
object somewhere so it doesn't get collected.
"""
def __init__(self, viewer):
self.viewer = viewer
# set to false to disable
self.visible = True
self.fontsize = 12
# for displaying modal keyboard state
self.mode_obj = None
bm = viewer.get_bindmap()
bm.add_callback('mode-set', self.mode_change_cb)
viewer.add_callback('configure', self._configure_cb)
def mode_change_cb(self, bindmap, mode, modetype):
# delete the old indicator
obj = self.mode_obj
self.mode_obj = None
#canvas = self.viewer.get_canvas()
canvas = self.viewer.private_canvas
if obj:
try:
canvas.delete_object(obj)
except:
pass
if not self.visible:
return True
# if not one of the standard modifiers, display the new one
if not mode in (None, 'ctrl', 'shift'):
Text = canvas.get_draw_class('text')
Rect = canvas.get_draw_class('rectangle')
Compound = canvas.get_draw_class('compoundobject')
if modetype == 'locked':
text = '%s [L]' % (mode)
elif modetype == 'softlock':
text = '%s [SL]' % (mode)
else:
text = mode
xsp, ysp = 4, 6
wd, ht = self.viewer.get_window_size()
wd, ht = int(wd), int(ht)
if self.viewer._originUpper:
x1, y1 = wd-self.fontsize*len(text), ht-self.fontsize
else:
# matplotlib case
x1, y1 = wd-self.fontsize*len(text), self.fontsize
o1 = Text(x1, y1, text,
fontsize=self.fontsize, color='yellow', coord='canvas')
wd, ht = self.viewer.renderer.get_dimensions(o1)
# yellow text on a black filled rectangle
if self.viewer._originUpper:
a1, b1, a2, b2 = x1-xsp, y1+ysp, x1+wd+xsp, y1-ht
else:
# matplotlib case
a1, b1, a2, b2 = x1-xsp, y1-ysp, x1+wd+2*xsp, y1+ht+ysp
o2 = Compound(Rect(a1, b1, a2, b2,
color='black', coord='canvas',
fill=True, fillcolor='black'),
o1)
self.mode_obj = o2
canvas.add(o2)
return True
def _configure_cb(self, view, width, height):
# redraw the mode indicator since the window has been resized
bm = view.get_bindmap()
mode, modetype = bm.current_mode()
self.mode_change_cb(bm, mode, modetype)
def generate_cfg_example(config_name, cfgpath='examples/configs', **kwargs):
"""Generate config file documentation for a given config name.
If found, it will be a Python code block of the contents.
If not found, it will have a generic message that the config
is not available.
Parameters
----------
config_name : str
Config name that is attached to the configuration file.
This is the same as input for ``prefs.createCategory()``.
For example, ``'general'``, ``'channel_Image'``, or
``'plugin_Zoom'``.
cfgpath : str
Where it is within package data.
kwargs : dict
Optional keywords for :func:`~astropy.utils.data.get_pkg_data_contents`.
Returns
-------
docstr : str
Docstring to be inserted into documentation.
"""
cfgname = config_name + '.cfg'
try:
cfgdata = get_pkg_data_contents(
os.path.join(cfgpath, cfgname), **kwargs)
except Exception as e:
warnings.warn(str(e), AstropyUserWarning)
return ''
homepath = '~' # Symbol for HOME for doc only, not actual code
userfile = os.path.join(homepath, '.ginga', cfgname)
docstring = io.StringIO()
docstring.write("""It is customizable using ``{0}``, where ``{1}``
is your HOME directory:
.. code-block:: Python
""".format(userfile, homepath))
for line in cfgdata.split('\n'):
line = line.strip()
if len(line) == 0:
docstring.write('\n') # Prevent trailing spaces
else:
docstring.write(' {0}\n'.format(line))
return docstring.getvalue()
#END
| bsd-3-clause |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/io/stata.py | 1 | 108362 | """
Module contains tools for processing Stata files into DataFrames
The StataReader below was originally written by Joe Presbrey as part of PyDTA.
It has been extended and improved by Skipper Seabold from the Statsmodels
project who also developed the StataWriter and was finally added to pandas in
a once again improved version.
You can find more information on http://presbrey.mit.edu/PyDTA and
http://www.statsmodels.org/devel/
"""
from collections import OrderedDict
import datetime
from io import BytesIO
import os
import struct
import sys
import warnings
from dateutil.relativedelta import relativedelta
import numpy as np
from pandas._libs.lib import infer_dtype
from pandas._libs.writers import max_len_string_array
from pandas.util._decorators import Appender, deprecate_kwarg
from pandas.core.dtypes.common import (
ensure_object,
is_categorical_dtype,
is_datetime64_dtype,
)
from pandas import (
Categorical,
DatetimeIndex,
NaT,
Timestamp,
concat,
isna,
to_datetime,
to_timedelta,
)
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas.io.common import BaseIterator, _stringify_path, get_filepath_or_buffer
_version_error = (
"Version of given Stata file is not 104, 105, 108, "
"111 (Stata 7SE), 113 (Stata 8/9), 114 (Stata 10/11), "
"115 (Stata 12), 117 (Stata 13), or 118 (Stata 14)"
)
_statafile_processing_params1 = """\
convert_dates : boolean, defaults to True
Convert date variables to DataFrame time values.
convert_categoricals : boolean, defaults to True
Read value labels and convert columns to Categorical/Factor variables."""
_encoding_params = """\
encoding : string, None or encoding
Encoding used to parse the files. None defaults to latin-1."""
_statafile_processing_params2 = """\
index_col : string, optional, default: None
Column to set as index.
convert_missing : boolean, defaults to False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replaced with nan.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
preserve_dtypes : boolean, defaults to True
Preserve Stata datatypes. If False, numeric data are upcast to pandas
default types for foreign data (float64 or int64).
columns : list or None
Columns to retain. Columns will be returned in the given order. None
returns all columns.
order_categoricals : boolean, defaults to True
Flag indicating whether converted categorical data are ordered."""
_chunksize_params = """\
chunksize : int, default None
Return StataReader object for iterations, returns chunks with
given number of lines."""
_iterator_params = """\
iterator : boolean, default False
Return StataReader object."""
_read_stata_doc = """
Read Stata file into DataFrame.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be: ``file://localhost/path/to/table.dta``.
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handler (e.g. via builtin ``open`` function)
or ``StringIO``.
%s
%s
%s
%s
%s
Returns
-------
DataFrame or StataReader
See Also
--------
io.stata.StataReader : Low-level reader for Stata data files.
DataFrame.to_stata: Export Stata data files.
Examples
--------
Read a Stata dta file:
>>> df = pd.read_stata('filename.dta')
Read a Stata dta file in 10,000 line chunks:
>>> itr = pd.read_stata('filename.dta', chunksize=10000)
>>> for chunk in itr:
... do_something(chunk)
""" % (
_statafile_processing_params1,
_encoding_params,
_statafile_processing_params2,
_chunksize_params,
_iterator_params,
)
_data_method_doc = """\
Read observations from Stata file, converting them into a dataframe
.. deprecated::
This is a legacy method. Use `read` in new code.
Parameters
----------
%s
%s
Returns
-------
DataFrame
""" % (
_statafile_processing_params1,
_statafile_processing_params2,
)
_read_method_doc = """\
Reads observations from Stata file, converting them into a dataframe
Parameters
----------
nrows : int
Number of lines to read from data file, if None read whole file.
%s
%s
Returns
-------
DataFrame
""" % (
_statafile_processing_params1,
_statafile_processing_params2,
)
_stata_reader_doc = """\
Class for reading Stata dta files.
Parameters
----------
path_or_buf : path (string), buffer or path object
string, path object (pathlib.Path or py._path.local.LocalPath) or object
implementing a binary read() functions.
.. versionadded:: 0.23.0 support for pathlib, py.path.
%s
%s
%s
%s
""" % (
_statafile_processing_params1,
_statafile_processing_params2,
_encoding_params,
_chunksize_params,
)
@Appender(_read_stata_doc)
@deprecate_kwarg(old_arg_name="encoding", new_arg_name=None)
@deprecate_kwarg(old_arg_name="index", new_arg_name="index_col")
def read_stata(
filepath_or_buffer,
convert_dates=True,
convert_categoricals=True,
encoding=None,
index_col=None,
convert_missing=False,
preserve_dtypes=True,
columns=None,
order_categoricals=True,
chunksize=None,
iterator=False,
):
reader = StataReader(
filepath_or_buffer,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals,
index_col=index_col,
convert_missing=convert_missing,
preserve_dtypes=preserve_dtypes,
columns=columns,
order_categoricals=order_categoricals,
chunksize=chunksize,
)
if iterator or chunksize:
data = reader
else:
try:
data = reader.read()
finally:
reader.close()
return data
_date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"]
stata_epoch = datetime.datetime(1960, 1, 1)
def _stata_elapsed_date_to_datetime_vec(dates, fmt):
"""
Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Returns
Returns
-------
converted : Series
The converted dates
Examples
--------
>>> dates = pd.Series([52])
>>> _stata_elapsed_date_to_datetime_vec(dates , "%tw")
0 1961-01-01
dtype: datetime64[ns]
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
If you don't have pandas with datetime support, then you can't do
milliseconds accurately.
"""
MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year
MAX_DAY_DELTA = (Timestamp.max - datetime.datetime(1960, 1, 1)).days
MIN_DAY_DELTA = (Timestamp.min - datetime.datetime(1960, 1, 1)).days
MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000
MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000
def convert_year_month_safe(year, month):
"""
Convert year and month to datetimes, using pandas vectorized versions
when the date range falls within the range supported by pandas.
Otherwise it falls back to a slower but more robust method
using datetime.
"""
if year.max() < MAX_YEAR and year.min() > MIN_YEAR:
return to_datetime(100 * year + month, format="%Y%m")
else:
index = getattr(year, "index", None)
return Series(
[datetime.datetime(y, m, 1) for y, m in zip(year, month)], index=index
)
def convert_year_days_safe(year, days):
"""
Converts year (e.g. 1999) and days since the start of the year to a
datetime or datetime64 Series
"""
if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR:
return to_datetime(year, format="%Y") + to_timedelta(days, unit="d")
else:
index = getattr(year, "index", None)
value = [
datetime.datetime(y, 1, 1) + relativedelta(days=int(d))
for y, d in zip(year, days)
]
return Series(value, index=index)
def convert_delta_safe(base, deltas, unit):
"""
Convert base dates and deltas to datetimes, using pandas vectorized
versions if the deltas satisfy restrictions required to be expressed
as dates in pandas.
"""
index = getattr(deltas, "index", None)
if unit == "d":
if deltas.max() > MAX_DAY_DELTA or deltas.min() < MIN_DAY_DELTA:
values = [base + relativedelta(days=int(d)) for d in deltas]
return Series(values, index=index)
elif unit == "ms":
if deltas.max() > MAX_MS_DELTA or deltas.min() < MIN_MS_DELTA:
values = [
base + relativedelta(microseconds=(int(d) * 1000)) for d in deltas
]
return Series(values, index=index)
else:
raise ValueError("format not understood")
base = to_datetime(base)
deltas = to_timedelta(deltas, unit=unit)
return base + deltas
# TODO: If/when pandas supports more than datetime64[ns], this should be
# improved to use correct range, e.g. datetime[Y] for yearly
bad_locs = np.isnan(dates)
has_bad_values = False
if bad_locs.any():
has_bad_values = True
data_col = Series(dates)
data_col[bad_locs] = 1.0 # Replace with NaT
dates = dates.astype(np.int64)
if fmt.startswith(("%tc", "tc")): # Delta ms relative to base
base = stata_epoch
ms = dates
conv_dates = convert_delta_safe(base, ms, "ms")
elif fmt.startswith(("%tC", "tC")):
warnings.warn("Encountered %tC format. Leaving in Stata " "Internal Format.")
conv_dates = Series(dates, dtype=np.object)
if has_bad_values:
conv_dates[bad_locs] = NaT
return conv_dates
# Delta days relative to base
elif fmt.startswith(("%td", "td", "%d", "d")):
base = stata_epoch
days = dates
conv_dates = convert_delta_safe(base, days, "d")
# does not count leap days - 7 days is a week.
# 52nd week may have more than 7 days
elif fmt.startswith(("%tw", "tw")):
year = stata_epoch.year + dates // 52
days = (dates % 52) * 7
conv_dates = convert_year_days_safe(year, days)
elif fmt.startswith(("%tm", "tm")): # Delta months relative to base
year = stata_epoch.year + dates // 12
month = (dates % 12) + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt.startswith(("%tq", "tq")): # Delta quarters relative to base
year = stata_epoch.year + dates // 4
month = (dates % 4) * 3 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt.startswith(("%th", "th")): # Delta half-years relative to base
year = stata_epoch.year + dates // 2
month = (dates % 2) * 6 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt.startswith(("%ty", "ty")): # Years -- not delta
year = dates
month = np.ones_like(dates)
conv_dates = convert_year_month_safe(year, month)
else:
raise ValueError("Date fmt {fmt} not understood".format(fmt=fmt))
if has_bad_values: # Restore NaT for bad values
conv_dates[bad_locs] = NaT
return conv_dates
def _datetime_to_stata_elapsed_vec(dates, fmt):
"""
Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
Series or array containing datetime.datetime or datetime64[ns] to
convert to the Stata Internal Format given by fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
"""
index = dates.index
NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000
US_PER_DAY = NS_PER_DAY / 1000
def parse_dates_safe(dates, delta=False, year=False, days=False):
d = {}
if is_datetime64_dtype(dates.values):
if delta:
delta = dates - stata_epoch
d["delta"] = delta.values.astype(np.int64) // 1000 # microseconds
if days or year:
dates = DatetimeIndex(dates)
d["year"], d["month"] = dates.year, dates.month
if days:
days = dates.astype(np.int64) - to_datetime(
d["year"], format="%Y"
).astype(np.int64)
d["days"] = days // NS_PER_DAY
elif infer_dtype(dates, skipna=False) == "datetime":
if delta:
delta = dates.values - stata_epoch
f = lambda x: US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds
v = np.vectorize(f)
d["delta"] = v(delta)
if year:
year_month = dates.apply(lambda x: 100 * x.year + x.month)
d["year"] = year_month.values // 100
d["month"] = year_month.values - d["year"] * 100
if days:
f = lambda x: (x - datetime.datetime(x.year, 1, 1)).days
v = np.vectorize(f)
d["days"] = v(dates)
else:
raise ValueError(
"Columns containing dates must contain either "
"datetime64, datetime.datetime or null values."
)
return DataFrame(d, index=index)
bad_loc = isna(dates)
index = dates.index
if bad_loc.any():
dates = Series(dates)
if is_datetime64_dtype(dates):
dates[bad_loc] = to_datetime(stata_epoch)
else:
dates[bad_loc] = stata_epoch
if fmt in ["%tc", "tc"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta / 1000
elif fmt in ["%tC", "tC"]:
warnings.warn("Stata Internal Format tC not supported.")
conv_dates = dates
elif fmt in ["%td", "td"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta // US_PER_DAY
elif fmt in ["%tw", "tw"]:
d = parse_dates_safe(dates, year=True, days=True)
conv_dates = 52 * (d.year - stata_epoch.year) + d.days // 7
elif fmt in ["%tm", "tm"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 12 * (d.year - stata_epoch.year) + d.month - 1
elif fmt in ["%tq", "tq"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3
elif fmt in ["%th", "th"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 2 * (d.year - stata_epoch.year) + (d.month > 6).astype(np.int)
elif fmt in ["%ty", "ty"]:
d = parse_dates_safe(dates, year=True)
conv_dates = d.year
else:
raise ValueError(
"Format {fmt} is not a known Stata date format".format(fmt=fmt)
)
conv_dates = Series(conv_dates, dtype=np.float64)
missing_value = struct.unpack("<d", b"\x00\x00\x00\x00\x00\x00\xe0\x7f")[0]
conv_dates[bad_loc] = missing_value
return Series(conv_dates, index=index)
excessive_string_length_error = """
Fixed width strings in Stata .dta files are limited to 244 (or fewer)
characters. Column '%s' does not satisfy this restriction. Use the
'version=117' parameter to write the newer (Stata 13 and later) format.
"""
class PossiblePrecisionLoss(Warning):
pass
precision_loss_doc = """
Column converted from %s to %s, and some data are outside of the lossless
conversion range. This may result in a loss of precision in the saved data.
"""
class ValueLabelTypeMismatch(Warning):
pass
value_label_mismatch_doc = """
Stata value labels (pandas categories) must be strings. Column {0} contains
non-string labels which will be converted to strings. Please check that the
Stata data file created has not lost information due to duplicate labels.
"""
class InvalidColumnName(Warning):
pass
invalid_name_doc = """
Not all pandas column names were valid Stata variable names.
The following replacements have been made:
{0}
If this is not what you expect, please make sure you have Stata-compliant
column names in your DataFrame (strings only, max 32 characters, only
alphanumerics and underscores, no Stata reserved words)
"""
def _cast_to_stata_types(data):
"""Checks the dtypes of the columns of a pandas DataFrame for
compatibility with the data types and ranges supported by Stata, and
converts if necessary.
Parameters
----------
data : DataFrame
The DataFrame to check and convert
Notes
-----
Numeric columns in Stata must be one of int8, int16, int32, float32 or
float64, with some additional value restrictions. int8 and int16 columns
are checked for violations of the value restrictions and upcast if needed.
int64 data is not usable in Stata, and so it is downcast to int32 whenever
the value are in the int32 range, and sidecast to float64 when larger than
this range. If the int64 values are outside of the range of those
perfectly representable as float64 values, a warning is raised.
bool columns are cast to int8. uint columns are converted to int of the
same size if there is no loss in precision, otherwise are upcast to a
larger type. uint64 is currently not supported since it is concerted to
object in a DataFrame.
"""
ws = ""
# original, if small, if large
conversion_data = (
(np.bool, np.int8, np.int8),
(np.uint8, np.int8, np.int16),
(np.uint16, np.int16, np.int32),
(np.uint32, np.int32, np.int64),
)
float32_max = struct.unpack("<f", b"\xff\xff\xff\x7e")[0]
float64_max = struct.unpack("<d", b"\xff\xff\xff\xff\xff\xff\xdf\x7f")[0]
for col in data:
dtype = data[col].dtype
# Cast from unsupported types to supported types
for c_data in conversion_data:
if dtype == c_data[0]:
if data[col].max() <= np.iinfo(c_data[1]).max:
dtype = c_data[1]
else:
dtype = c_data[2]
if c_data[2] == np.float64: # Warn if necessary
if data[col].max() >= 2 ** 53:
ws = precision_loss_doc % ("uint64", "float64")
data[col] = data[col].astype(dtype)
# Check values and upcast if necessary
if dtype == np.int8:
if data[col].max() > 100 or data[col].min() < -127:
data[col] = data[col].astype(np.int16)
elif dtype == np.int16:
if data[col].max() > 32740 or data[col].min() < -32767:
data[col] = data[col].astype(np.int32)
elif dtype == np.int64:
if data[col].max() <= 2147483620 and data[col].min() >= -2147483647:
data[col] = data[col].astype(np.int32)
else:
data[col] = data[col].astype(np.float64)
if data[col].max() >= 2 ** 53 or data[col].min() <= -2 ** 53:
ws = precision_loss_doc % ("int64", "float64")
elif dtype in (np.float32, np.float64):
value = data[col].max()
if np.isinf(value):
raise ValueError(
"Column {col} has a maximum value of "
"infinity which is outside the range "
"supported by Stata.".format(col=col)
)
if dtype == np.float32 and value > float32_max:
data[col] = data[col].astype(np.float64)
elif dtype == np.float64:
if value > float64_max:
raise ValueError(
"Column {col} has a maximum value "
"({val}) outside the range supported by "
"Stata ({float64_max})".format(
col=col, val=value, float64_max=float64_max
)
)
if ws:
warnings.warn(ws, PossiblePrecisionLoss)
return data
class StataValueLabel:
"""
Parse a categorical column and prepare formatted output
Parameters
----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Methods
-------
generate_value_label
"""
def __init__(self, catarray):
self.labname = catarray.name
categories = catarray.cat.categories
self.value_labels = list(zip(np.arange(len(categories)), categories))
self.value_labels.sort(key=lambda x: x[0])
self.text_len = np.int32(0)
self.off = []
self.val = []
self.txt = []
self.n = 0
# Compute lengths and setup lists of offsets and labels
for vl in self.value_labels:
category = vl[1]
if not isinstance(category, str):
category = str(category)
warnings.warn(
value_label_mismatch_doc.format(catarray.name),
ValueLabelTypeMismatch,
)
self.off.append(self.text_len)
self.text_len += len(category) + 1 # +1 for the padding
self.val.append(vl[0])
self.txt.append(category)
self.n += 1
if self.text_len > 32000:
raise ValueError(
"Stata value labels for a single variable must "
"have a combined length less than 32,000 "
"characters."
)
# Ensure int32
self.off = np.array(self.off, dtype=np.int32)
self.val = np.array(self.val, dtype=np.int32)
# Total length
self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len
def _encode(self, s):
"""
Python 3 compatibility shim
"""
return s.encode(self._encoding)
def generate_value_label(self, byteorder, encoding):
"""
Parameters
----------
byteorder : str
Byte order of the output
encoding : str
File encoding
Returns
-------
value_label : bytes
Bytes containing the formatted value label
"""
self._encoding = encoding
bio = BytesIO()
null_string = "\x00"
null_byte = b"\x00"
# len
bio.write(struct.pack(byteorder + "i", self.len))
# labname
labname = self._encode(_pad_bytes(self.labname[:32], 33))
bio.write(labname)
# padding - 3 bytes
for i in range(3):
bio.write(struct.pack("c", null_byte))
# value_label_table
# n - int32
bio.write(struct.pack(byteorder + "i", self.n))
# textlen - int32
bio.write(struct.pack(byteorder + "i", self.text_len))
# off - int32 array (n elements)
for offset in self.off:
bio.write(struct.pack(byteorder + "i", offset))
# val - int32 array (n elements)
for value in self.val:
bio.write(struct.pack(byteorder + "i", value))
# txt - Text labels, null terminated
for text in self.txt:
bio.write(self._encode(text + null_string))
bio.seek(0)
return bio.read()
class StataMissingValue:
"""
An observation's missing value.
Parameters
----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Notes
-----
More information: <http://www.stata.com/help.cgi?missing>
Integer missing values make the code '.', '.a', ..., '.z' to the ranges
101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ...
2147483647 (for int32). Missing values for floating point data types are
more complex but the pattern is simple to discern from the following table.
np.float32 missing values (float in Stata)
0000007f .
0008007f .a
0010007f .b
...
00c0007f .x
00c8007f .y
00d0007f .z
np.float64 missing values (double in Stata)
000000000000e07f .
000000000001e07f .a
000000000002e07f .b
...
000000000018e07f .x
000000000019e07f .y
00000000001ae07f .z
"""
# Construct a dictionary of missing values
MISSING_VALUES = {}
bases = (101, 32741, 2147483621)
for b in bases:
# Conversion to long to avoid hash issues on 32 bit platforms #8968
MISSING_VALUES[b] = "."
for i in range(1, 27):
MISSING_VALUES[i + b] = "." + chr(96 + i)
float32_base = b"\x00\x00\x00\x7f"
increment = struct.unpack("<i", b"\x00\x08\x00\x00")[0]
for i in range(27):
value = struct.unpack("<f", float32_base)[0]
MISSING_VALUES[value] = "."
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack("<i", struct.pack("<f", value))[0] + increment
float32_base = struct.pack("<i", int_value)
float64_base = b"\x00\x00\x00\x00\x00\x00\xe0\x7f"
increment = struct.unpack("q", b"\x00\x00\x00\x00\x00\x01\x00\x00")[0]
for i in range(27):
value = struct.unpack("<d", float64_base)[0]
MISSING_VALUES[value] = "."
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack("q", struct.pack("<d", value))[0] + increment
float64_base = struct.pack("q", int_value)
BASE_MISSING_VALUES = {
"int8": 101,
"int16": 32741,
"int32": 2147483621,
"float32": struct.unpack("<f", float32_base)[0],
"float64": struct.unpack("<d", float64_base)[0],
}
def __init__(self, value):
self._value = value
# Conversion to int to avoid hash issues on 32 bit platforms #8968
value = int(value) if value < 2147483648 else float(value)
self._str = self.MISSING_VALUES[value]
string = property(
lambda self: self._str,
doc="The Stata representation of the missing value: " "'.', '.a'..'.z'",
)
value = property(
lambda self: self._value, doc="The binary representation of the missing value."
)
def __str__(self):
return self.string
def __repr__(self):
# not perfect :-/
return "{cls}({obj})".format(cls=self.__class__, obj=self)
def __eq__(self, other):
return (
isinstance(other, self.__class__)
and self.string == other.string
and self.value == other.value
)
@classmethod
def get_base_missing_value(cls, dtype):
if dtype == np.int8:
value = cls.BASE_MISSING_VALUES["int8"]
elif dtype == np.int16:
value = cls.BASE_MISSING_VALUES["int16"]
elif dtype == np.int32:
value = cls.BASE_MISSING_VALUES["int32"]
elif dtype == np.float32:
value = cls.BASE_MISSING_VALUES["float32"]
elif dtype == np.float64:
value = cls.BASE_MISSING_VALUES["float64"]
else:
raise ValueError("Unsupported dtype")
return value
class StataParser:
def __init__(self):
# type code.
# --------------------
# str1 1 = 0x01
# str2 2 = 0x02
# ...
# str244 244 = 0xf4
# byte 251 = 0xfb (sic)
# int 252 = 0xfc
# long 253 = 0xfd
# float 254 = 0xfe
# double 255 = 0xff
# --------------------
# NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
self.DTYPE_MAP = dict(
list(zip(range(1, 245), ["a" + str(i) for i in range(1, 245)]))
+ [
(251, np.int8),
(252, np.int16),
(253, np.int32),
(254, np.float32),
(255, np.float64),
]
)
self.DTYPE_MAP_XML = dict(
[
(32768, np.uint8), # Keys to GSO
(65526, np.float64),
(65527, np.float32),
(65528, np.int32),
(65529, np.int16),
(65530, np.int8),
]
)
self.TYPE_MAP = list(range(251)) + list("bhlfd")
self.TYPE_MAP_XML = dict(
[
# Not really a Q, unclear how to handle byteswap
(32768, "Q"),
(65526, "d"),
(65527, "f"),
(65528, "l"),
(65529, "h"),
(65530, "b"),
]
)
# NOTE: technically, some of these are wrong. there are more numbers
# that can be represented. it's the 27 ABOVE and BELOW the max listed
# numeric data type in [U] 12.2.2 of the 11.2 manual
float32_min = b"\xff\xff\xff\xfe"
float32_max = b"\xff\xff\xff\x7e"
float64_min = b"\xff\xff\xff\xff\xff\xff\xef\xff"
float64_max = b"\xff\xff\xff\xff\xff\xff\xdf\x7f"
self.VALID_RANGE = {
"b": (-127, 100),
"h": (-32767, 32740),
"l": (-2147483647, 2147483620),
"f": (
np.float32(struct.unpack("<f", float32_min)[0]),
np.float32(struct.unpack("<f", float32_max)[0]),
),
"d": (
np.float64(struct.unpack("<d", float64_min)[0]),
np.float64(struct.unpack("<d", float64_max)[0]),
),
}
self.OLD_TYPE_MAPPING = {
98: 251, # byte
105: 252, # int
108: 253, # long
102: 254 # float
# don't know old code for double
}
# These missing values are the generic '.' in Stata, and are used
# to replace nans
self.MISSING_VALUES = {
"b": 101,
"h": 32741,
"l": 2147483621,
"f": np.float32(struct.unpack("<f", b"\x00\x00\x00\x7f")[0]),
"d": np.float64(
struct.unpack("<d", b"\x00\x00\x00\x00\x00\x00\xe0\x7f")[0]
),
}
self.NUMPY_TYPE_MAP = {
"b": "i1",
"h": "i2",
"l": "i4",
"f": "f4",
"d": "f8",
"Q": "u8",
}
# Reserved words cannot be used as variable names
self.RESERVED_WORDS = (
"aggregate",
"array",
"boolean",
"break",
"byte",
"case",
"catch",
"class",
"colvector",
"complex",
"const",
"continue",
"default",
"delegate",
"delete",
"do",
"double",
"else",
"eltypedef",
"end",
"enum",
"explicit",
"export",
"external",
"float",
"for",
"friend",
"function",
"global",
"goto",
"if",
"inline",
"int",
"local",
"long",
"NULL",
"pragma",
"protected",
"quad",
"rowvector",
"short",
"typedef",
"typename",
"virtual",
)
class StataReader(StataParser, BaseIterator):
__doc__ = _stata_reader_doc
@deprecate_kwarg(old_arg_name="encoding", new_arg_name=None)
@deprecate_kwarg(old_arg_name="index", new_arg_name="index_col")
def __init__(
self,
path_or_buf,
convert_dates=True,
convert_categoricals=True,
index_col=None,
convert_missing=False,
preserve_dtypes=True,
columns=None,
order_categoricals=True,
encoding=None,
chunksize=None,
):
super().__init__()
self.col_sizes = ()
# Arguments to the reader (can be temporarily overridden in
# calls to read).
self._convert_dates = convert_dates
self._convert_categoricals = convert_categoricals
self._index_col = index_col
self._convert_missing = convert_missing
self._preserve_dtypes = preserve_dtypes
self._columns = columns
self._order_categoricals = order_categoricals
self._encoding = None
self._chunksize = chunksize
# State variables for the file
self._has_string_data = False
self._missing_values = False
self._can_read_value_labels = False
self._column_selector_set = False
self._value_labels_read = False
self._data_read = False
self._dtype = None
self._lines_read = 0
self._native_byteorder = _set_endianness(sys.byteorder)
path_or_buf = _stringify_path(path_or_buf)
if isinstance(path_or_buf, str):
path_or_buf, encoding, _, should_close = get_filepath_or_buffer(path_or_buf)
if isinstance(path_or_buf, (str, bytes)):
self.path_or_buf = open(path_or_buf, "rb")
else:
# Copy to BytesIO, and ensure no encoding
contents = path_or_buf.read()
self.path_or_buf = BytesIO(contents)
self._read_header()
self._setup_dtype()
def __enter__(self):
""" enter context manager """
return self
def __exit__(self, exc_type, exc_value, traceback):
""" exit context manager """
self.close()
def close(self):
""" close the handle if its open """
try:
self.path_or_buf.close()
except IOError:
pass
def _set_encoding(self):
"""
Set string encoding which depends on file version
"""
if self.format_version < 118:
self._encoding = "latin-1"
else:
self._encoding = "utf-8"
def _read_header(self):
first_char = self.path_or_buf.read(1)
if struct.unpack("c", first_char)[0] == b"<":
self._read_new_header(first_char)
else:
self._read_old_header(first_char)
self.has_string_data = len([x for x in self.typlist if type(x) is int]) > 0
# calculate size of a data record
self.col_sizes = [self._calcsize(typ) for typ in self.typlist]
def _read_new_header(self, first_char):
# The first part of the header is common to 117 and 118.
self.path_or_buf.read(27) # stata_dta><header><release>
self.format_version = int(self.path_or_buf.read(3))
if self.format_version not in [117, 118]:
raise ValueError(_version_error)
self._set_encoding()
self.path_or_buf.read(21) # </release><byteorder>
self.byteorder = self.path_or_buf.read(3) == b"MSF" and ">" or "<"
self.path_or_buf.read(15) # </byteorder><K>
self.nvar = struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
self.path_or_buf.read(7) # </K><N>
self.nobs = self._get_nobs()
self.path_or_buf.read(11) # </N><label>
self._data_label = self._get_data_label()
self.path_or_buf.read(19) # </label><timestamp>
self.time_stamp = self._get_time_stamp()
self.path_or_buf.read(26) # </timestamp></header><map>
self.path_or_buf.read(8) # 0x0000000000000000
self.path_or_buf.read(8) # position of <map>
self._seek_vartypes = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 16
)
self._seek_varnames = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 10
)
self._seek_sortlist = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 10
)
self._seek_formats = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 9
)
self._seek_value_label_names = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 19
)
# Requires version-specific treatment
self._seek_variable_labels = self._get_seek_variable_labels()
self.path_or_buf.read(8) # <characteristics>
self.data_location = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 6
)
self.seek_strls = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 7
)
self.seek_value_labels = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 14
)
self.typlist, self.dtyplist = self._get_dtypes(self._seek_vartypes)
self.path_or_buf.seek(self._seek_varnames)
self.varlist = self._get_varlist()
self.path_or_buf.seek(self._seek_sortlist)
self.srtlist = struct.unpack(
self.byteorder + ("h" * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1)),
)[:-1]
self.path_or_buf.seek(self._seek_formats)
self.fmtlist = self._get_fmtlist()
self.path_or_buf.seek(self._seek_value_label_names)
self.lbllist = self._get_lbllist()
self.path_or_buf.seek(self._seek_variable_labels)
self._variable_labels = self._get_variable_labels()
# Get data type information, works for versions 117-118.
def _get_dtypes(self, seek_vartypes):
self.path_or_buf.seek(seek_vartypes)
raw_typlist = [
struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
for i in range(self.nvar)
]
def f(typ):
if typ <= 2045:
return typ
try:
return self.TYPE_MAP_XML[typ]
except KeyError:
raise ValueError("cannot convert stata types [{0}]".format(typ))
typlist = [f(x) for x in raw_typlist]
def f(typ):
if typ <= 2045:
return str(typ)
try:
return self.DTYPE_MAP_XML[typ]
except KeyError:
raise ValueError("cannot convert stata dtype [{0}]".format(typ))
dtyplist = [f(x) for x in raw_typlist]
return typlist, dtyplist
def _get_varlist(self):
if self.format_version == 117:
b = 33
elif self.format_version == 118:
b = 129
return [self._decode(self.path_or_buf.read(b)) for i in range(self.nvar)]
# Returns the format list
def _get_fmtlist(self):
if self.format_version == 118:
b = 57
elif self.format_version > 113:
b = 49
elif self.format_version > 104:
b = 12
else:
b = 7
return [self._decode(self.path_or_buf.read(b)) for i in range(self.nvar)]
# Returns the label list
def _get_lbllist(self):
if self.format_version >= 118:
b = 129
elif self.format_version > 108:
b = 33
else:
b = 9
return [self._decode(self.path_or_buf.read(b)) for i in range(self.nvar)]
def _get_variable_labels(self):
if self.format_version == 118:
vlblist = [
self._decode(self.path_or_buf.read(321)) for i in range(self.nvar)
]
elif self.format_version > 105:
vlblist = [
self._decode(self.path_or_buf.read(81)) for i in range(self.nvar)
]
else:
vlblist = [
self._decode(self.path_or_buf.read(32)) for i in range(self.nvar)
]
return vlblist
def _get_nobs(self):
if self.format_version == 118:
return struct.unpack(self.byteorder + "Q", self.path_or_buf.read(8))[0]
else:
return struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
def _get_data_label(self):
if self.format_version == 118:
strlen = struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version == 117:
strlen = struct.unpack("b", self.path_or_buf.read(1))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version > 105:
return self._decode(self.path_or_buf.read(81))
else:
return self._decode(self.path_or_buf.read(32))
def _get_time_stamp(self):
if self.format_version == 118:
strlen = struct.unpack("b", self.path_or_buf.read(1))[0]
return self.path_or_buf.read(strlen).decode("utf-8")
elif self.format_version == 117:
strlen = struct.unpack("b", self.path_or_buf.read(1))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version > 104:
return self._decode(self.path_or_buf.read(18))
else:
raise ValueError()
def _get_seek_variable_labels(self):
if self.format_version == 117:
self.path_or_buf.read(8) # <variable_lables>, throw away
# Stata 117 data files do not follow the described format. This is
# a work around that uses the previous label, 33 bytes for each
# variable, 20 for the closing tag and 17 for the opening tag
return self._seek_value_label_names + (33 * self.nvar) + 20 + 17
elif self.format_version == 118:
return struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 17
else:
raise ValueError()
def _read_old_header(self, first_char):
self.format_version = struct.unpack("b", first_char)[0]
if self.format_version not in [104, 105, 108, 111, 113, 114, 115]:
raise ValueError(_version_error)
self._set_encoding()
self.byteorder = (
struct.unpack("b", self.path_or_buf.read(1))[0] == 0x1 and ">" or "<"
)
self.filetype = struct.unpack("b", self.path_or_buf.read(1))[0]
self.path_or_buf.read(1) # unused
self.nvar = struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
self.nobs = self._get_nobs()
self._data_label = self._get_data_label()
self.time_stamp = self._get_time_stamp()
# descriptors
if self.format_version > 108:
typlist = [ord(self.path_or_buf.read(1)) for i in range(self.nvar)]
else:
buf = self.path_or_buf.read(self.nvar)
typlistb = np.frombuffer(buf, dtype=np.uint8)
typlist = []
for tp in typlistb:
if tp in self.OLD_TYPE_MAPPING:
typlist.append(self.OLD_TYPE_MAPPING[tp])
else:
typlist.append(tp - 127) # bytes
try:
self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
except ValueError:
raise ValueError(
"cannot convert stata types [{0}]".format(
",".join(str(x) for x in typlist)
)
)
try:
self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
except ValueError:
raise ValueError(
"cannot convert stata dtypes [{0}]".format(
",".join(str(x) for x in typlist)
)
)
if self.format_version > 108:
self.varlist = [
self._decode(self.path_or_buf.read(33)) for i in range(self.nvar)
]
else:
self.varlist = [
self._decode(self.path_or_buf.read(9)) for i in range(self.nvar)
]
self.srtlist = struct.unpack(
self.byteorder + ("h" * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1)),
)[:-1]
self.fmtlist = self._get_fmtlist()
self.lbllist = self._get_lbllist()
self._variable_labels = self._get_variable_labels()
# ignore expansion fields (Format 105 and later)
# When reading, read five bytes; the last four bytes now tell you
# the size of the next read, which you discard. You then continue
# like this until you read 5 bytes of zeros.
if self.format_version > 104:
while True:
data_type = struct.unpack(
self.byteorder + "b", self.path_or_buf.read(1)
)[0]
if self.format_version > 108:
data_len = struct.unpack(
self.byteorder + "i", self.path_or_buf.read(4)
)[0]
else:
data_len = struct.unpack(
self.byteorder + "h", self.path_or_buf.read(2)
)[0]
if data_type == 0:
break
self.path_or_buf.read(data_len)
# necessary data to continue parsing
self.data_location = self.path_or_buf.tell()
def _setup_dtype(self):
"""Map between numpy and state dtypes"""
if self._dtype is not None:
return self._dtype
dtype = [] # Convert struct data types to numpy data type
for i, typ in enumerate(self.typlist):
if typ in self.NUMPY_TYPE_MAP:
dtype.append(("s" + str(i), self.byteorder + self.NUMPY_TYPE_MAP[typ]))
else:
dtype.append(("s" + str(i), "S" + str(typ)))
dtype = np.dtype(dtype)
self._dtype = dtype
return self._dtype
def _calcsize(self, fmt):
return type(fmt) is int and fmt or struct.calcsize(self.byteorder + fmt)
def _decode(self, s):
# have bytes not strings, so must decode
s = s.partition(b"\0")[0]
try:
return s.decode(self._encoding)
except UnicodeDecodeError:
# GH 25960, fallback to handle incorrect format produced when 117
# files are converted to 118 files in Stata
msg = """
One or more strings in the dta file could not be decoded using {encoding}, and
so the fallback encoding of latin-1 is being used. This can happen when a file
has been incorrectly encoded by Stata or some other software. You should verify
the string values returned are correct."""
warnings.warn(msg.format(encoding=self._encoding), UnicodeWarning)
return s.decode("latin-1")
def _read_value_labels(self):
if self._value_labels_read:
# Don't read twice
return
if self.format_version <= 108:
# Value labels are not supported in version 108 and earlier.
self._value_labels_read = True
self.value_label_dict = dict()
return
if self.format_version >= 117:
self.path_or_buf.seek(self.seek_value_labels)
else:
offset = self.nobs * self._dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
self._value_labels_read = True
self.value_label_dict = dict()
while True:
if self.format_version >= 117:
if self.path_or_buf.read(5) == b"</val": # <lbl>
break # end of value label table
slength = self.path_or_buf.read(4)
if not slength:
break # end of value label table (format < 117)
if self.format_version <= 117:
labname = self._decode(self.path_or_buf.read(33))
else:
labname = self._decode(self.path_or_buf.read(129))
self.path_or_buf.read(3) # padding
n = struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
txtlen = struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
off = np.frombuffer(
self.path_or_buf.read(4 * n), dtype=self.byteorder + "i4", count=n
)
val = np.frombuffer(
self.path_or_buf.read(4 * n), dtype=self.byteorder + "i4", count=n
)
ii = np.argsort(off)
off = off[ii]
val = val[ii]
txt = self.path_or_buf.read(txtlen)
self.value_label_dict[labname] = dict()
for i in range(n):
end = off[i + 1] if i < n - 1 else txtlen
self.value_label_dict[labname][val[i]] = self._decode(txt[off[i] : end])
if self.format_version >= 117:
self.path_or_buf.read(6) # </lbl>
self._value_labels_read = True
def _read_strls(self):
self.path_or_buf.seek(self.seek_strls)
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
self.GSO = {"0": ""}
while True:
if self.path_or_buf.read(3) != b"GSO":
break
if self.format_version == 117:
v_o = struct.unpack(self.byteorder + "Q", self.path_or_buf.read(8))[0]
else:
buf = self.path_or_buf.read(12)
# Only tested on little endian file on little endian machine.
if self.byteorder == "<":
buf = buf[0:2] + buf[4:10]
else:
buf = buf[0:2] + buf[6:]
v_o = struct.unpack("Q", buf)[0]
typ = struct.unpack("B", self.path_or_buf.read(1))[0]
length = struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
va = self.path_or_buf.read(length)
if typ == 130:
va = va[0:-1].decode(self._encoding)
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
self.GSO[str(v_o)] = va
# legacy
@Appender(_data_method_doc)
def data(self, **kwargs):
warnings.warn("'data' is deprecated, use 'read' instead")
if self._data_read:
raise Exception("Data has already been read.")
self._data_read = True
return self.read(None, **kwargs)
def __next__(self):
return self.read(nrows=self._chunksize or 1)
def get_chunk(self, size=None):
"""
Reads lines from Stata file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
@Appender(_read_method_doc)
@deprecate_kwarg(old_arg_name="index", new_arg_name="index_col")
def read(
self,
nrows=None,
convert_dates=None,
convert_categoricals=None,
index_col=None,
convert_missing=None,
preserve_dtypes=None,
columns=None,
order_categoricals=None,
):
# Handle empty file or chunk. If reading incrementally raise
# StopIteration. If reading the whole thing return an empty
# data frame.
if (self.nobs == 0) and (nrows is None):
self._can_read_value_labels = True
self._data_read = True
self.close()
return DataFrame(columns=self.varlist)
# Handle options
if convert_dates is None:
convert_dates = self._convert_dates
if convert_categoricals is None:
convert_categoricals = self._convert_categoricals
if convert_missing is None:
convert_missing = self._convert_missing
if preserve_dtypes is None:
preserve_dtypes = self._preserve_dtypes
if columns is None:
columns = self._columns
if order_categoricals is None:
order_categoricals = self._order_categoricals
if index_col is None:
index_col = self._index_col
if nrows is None:
nrows = self.nobs
if (self.format_version >= 117) and (not self._value_labels_read):
self._can_read_value_labels = True
self._read_strls()
# Read data
dtype = self._dtype
max_read_len = (self.nobs - self._lines_read) * dtype.itemsize
read_len = nrows * dtype.itemsize
read_len = min(read_len, max_read_len)
if read_len <= 0:
# Iterator has finished, should never be here unless
# we are reading the file incrementally
if convert_categoricals:
self._read_value_labels()
self.close()
raise StopIteration
offset = self._lines_read * dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
read_lines = min(nrows, self.nobs - self._lines_read)
data = np.frombuffer(
self.path_or_buf.read(read_len), dtype=dtype, count=read_lines
)
self._lines_read += read_lines
if self._lines_read == self.nobs:
self._can_read_value_labels = True
self._data_read = True
# if necessary, swap the byte order to native here
if self.byteorder != self._native_byteorder:
data = data.byteswap().newbyteorder()
if convert_categoricals:
self._read_value_labels()
if len(data) == 0:
data = DataFrame(columns=self.varlist)
else:
data = DataFrame.from_records(data)
data.columns = self.varlist
# If index is not specified, use actual row number rather than
# restarting at 0 for each chunk.
if index_col is None:
ix = np.arange(self._lines_read - read_lines, self._lines_read)
data = data.set_index(ix)
if columns is not None:
try:
data = self._do_select_columns(data, columns)
except ValueError:
self.close()
raise
# Decode strings
for col, typ in zip(data, self.typlist):
if type(typ) is int:
data[col] = data[col].apply(self._decode, convert_dtype=True)
data = self._insert_strls(data)
cols_ = np.where(self.dtyplist)[0]
# Convert columns (if needed) to match input type
ix = data.index
requires_type_conversion = False
data_formatted = []
for i in cols_:
if self.dtyplist[i] is not None:
col = data.columns[i]
dtype = data[col].dtype
if dtype != np.dtype(object) and dtype != self.dtyplist[i]:
requires_type_conversion = True
data_formatted.append(
(col, Series(data[col], ix, self.dtyplist[i]))
)
else:
data_formatted.append((col, data[col]))
if requires_type_conversion:
data = DataFrame.from_dict(OrderedDict(data_formatted))
del data_formatted
data = self._do_convert_missing(data, convert_missing)
if convert_dates:
def any_startswith(x: str) -> bool:
return any(x.startswith(fmt) for fmt in _date_formats)
cols = np.where([any_startswith(x) for x in self.fmtlist])[0]
for i in cols:
col = data.columns[i]
try:
data[col] = _stata_elapsed_date_to_datetime_vec(
data[col], self.fmtlist[i]
)
except ValueError:
self.close()
raise
if convert_categoricals and self.format_version > 108:
data = self._do_convert_categoricals(
data, self.value_label_dict, self.lbllist, order_categoricals
)
if not preserve_dtypes:
retyped_data = []
convert = False
for col in data:
dtype = data[col].dtype
if dtype in (np.float16, np.float32):
dtype = np.float64
convert = True
elif dtype in (np.int8, np.int16, np.int32):
dtype = np.int64
convert = True
retyped_data.append((col, data[col].astype(dtype)))
if convert:
data = DataFrame.from_dict(OrderedDict(retyped_data))
if index_col is not None:
data = data.set_index(data.pop(index_col))
return data
def _do_convert_missing(self, data, convert_missing):
# Check for missing values, and replace if found
replacements = {}
for i, colname in enumerate(data):
fmt = self.typlist[i]
if fmt not in self.VALID_RANGE:
continue
nmin, nmax = self.VALID_RANGE[fmt]
series = data[colname]
missing = np.logical_or(series < nmin, series > nmax)
if not missing.any():
continue
if convert_missing: # Replacement follows Stata notation
missing_loc = np.argwhere(missing._ndarray_values)
umissing, umissing_loc = np.unique(series[missing], return_inverse=True)
replacement = Series(series, dtype=np.object)
for j, um in enumerate(umissing):
missing_value = StataMissingValue(um)
loc = missing_loc[umissing_loc == j]
replacement.iloc[loc] = missing_value
else: # All replacements are identical
dtype = series.dtype
if dtype not in (np.float32, np.float64):
dtype = np.float64
replacement = Series(series, dtype=dtype)
replacement[missing] = np.nan
replacements[colname] = replacement
if replacements:
columns = data.columns
replacements = DataFrame(replacements)
data = concat([data.drop(replacements.columns, 1), replacements], 1)
data = data[columns]
return data
def _insert_strls(self, data):
if not hasattr(self, "GSO") or len(self.GSO) == 0:
return data
for i, typ in enumerate(self.typlist):
if typ != "Q":
continue
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
data.iloc[:, i] = [self.GSO[str(k)] for k in data.iloc[:, i]]
return data
def _do_select_columns(self, data, columns):
if not self._column_selector_set:
column_set = set(columns)
if len(column_set) != len(columns):
raise ValueError("columns contains duplicate entries")
unmatched = column_set.difference(data.columns)
if unmatched:
raise ValueError(
"The following columns were not found in the "
"Stata data set: " + ", ".join(list(unmatched))
)
# Copy information for retained columns for later processing
dtyplist = []
typlist = []
fmtlist = []
lbllist = []
for col in columns:
i = data.columns.get_loc(col)
dtyplist.append(self.dtyplist[i])
typlist.append(self.typlist[i])
fmtlist.append(self.fmtlist[i])
lbllist.append(self.lbllist[i])
self.dtyplist = dtyplist
self.typlist = typlist
self.fmtlist = fmtlist
self.lbllist = lbllist
self._column_selector_set = True
return data[columns]
def _do_convert_categoricals(
self, data, value_label_dict, lbllist, order_categoricals
):
"""
Converts categorical columns to Categorical type.
"""
value_labels = list(value_label_dict.keys())
cat_converted_data = []
for col, label in zip(data, lbllist):
if label in value_labels:
# Explicit call with ordered=True
cat_data = Categorical(data[col], ordered=order_categoricals)
categories = []
for category in cat_data.categories:
if category in value_label_dict[label]:
categories.append(value_label_dict[label][category])
else:
categories.append(category) # Partially labeled
try:
cat_data.categories = categories
except ValueError:
vc = Series(categories).value_counts()
repeats = list(vc.index[vc > 1])
repeats = "-" * 80 + "\n" + "\n".join(repeats)
# GH 25772
msg = """
Value labels for column {col} are not unique. These cannot be converted to
pandas categoricals.
Either read the file with `convert_categoricals` set to False or use the
low level interface in `StataReader` to separately read the values and the
value_labels.
The repeated labels are:
{repeats}
"""
raise ValueError(msg.format(col=col, repeats=repeats))
# TODO: is the next line needed above in the data(...) method?
cat_data = Series(cat_data, index=data.index)
cat_converted_data.append((col, cat_data))
else:
cat_converted_data.append((col, data[col]))
data = DataFrame.from_dict(OrderedDict(cat_converted_data))
return data
@property
def data_label(self):
"""
Return data label of Stata file.
"""
return self._data_label
def variable_labels(self):
"""
Return variable labels as a dict, associating each variable name
with corresponding label.
Returns
-------
dict
"""
return dict(zip(self.varlist, self._variable_labels))
def value_labels(self):
"""
Return a dict, associating each variable name a dict, associating
each value its corresponding label.
Returns
-------
dict
"""
if not self._value_labels_read:
self._read_value_labels()
return self.value_label_dict
def _open_file_binary_write(fname):
"""
Open a binary file or no-op if file-like.
Parameters
----------
fname : string path, path object or buffer
Returns
-------
file : file-like object
File object supporting write
own : bool
True if the file was created, otherwise False
"""
if hasattr(fname, "write"):
# if 'b' not in fname.mode:
return fname, False
return open(fname, "wb"), True
def _set_endianness(endianness):
if endianness.lower() in ["<", "little"]:
return "<"
elif endianness.lower() in [">", "big"]:
return ">"
else: # pragma : no cover
raise ValueError("Endianness {endian} not understood".format(endian=endianness))
def _pad_bytes(name, length):
"""
Take a char string and pads it with null bytes until it's length chars.
"""
return name + "\x00" * (length - len(name))
def _convert_datetime_to_stata_type(fmt):
"""
Convert from one of the stata date formats to a type in TYPE_MAP.
"""
if fmt in [
"tc",
"%tc",
"td",
"%td",
"tw",
"%tw",
"tm",
"%tm",
"tq",
"%tq",
"th",
"%th",
"ty",
"%ty",
]:
return np.float64 # Stata expects doubles for SIFs
else:
raise NotImplementedError("Format {fmt} not implemented".format(fmt=fmt))
def _maybe_convert_to_int_keys(convert_dates, varlist):
new_dict = {}
for key in convert_dates:
if not convert_dates[key].startswith("%"): # make sure proper fmts
convert_dates[key] = "%" + convert_dates[key]
if key in varlist:
new_dict.update({varlist.index(key): convert_dates[key]})
else:
if not isinstance(key, int):
raise ValueError("convert_dates key must be a " "column or an integer")
new_dict.update({key: convert_dates[key]})
return new_dict
def _dtype_to_stata_type(dtype, column):
"""
Convert dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 244 are strings of this length
Pandas Stata
251 - for int8 byte
252 - for int16 int
253 - for int32 long
254 - for float32 float
255 - for double double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
# TODO: expand to handle datetime to integer conversion
if dtype.type == np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we
# do?
itemsize = max_len_string_array(ensure_object(column.values))
return max(itemsize, 1)
elif dtype == np.float64:
return 255
elif dtype == np.float32:
return 254
elif dtype == np.int32:
return 253
elif dtype == np.int16:
return 252
elif dtype == np.int8:
return 251
else: # pragma : no cover
raise NotImplementedError(
"Data type {dtype} not supported.".format(dtype=dtype)
)
def _dtype_to_default_stata_fmt(dtype, column, dta_version=114, force_strl=False):
"""
Map numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
object -> "%DDs" where DD is the length of the string. If not a string,
raise ValueError
float64 -> "%10.0g"
float32 -> "%9.0g"
int64 -> "%9.0g"
int32 -> "%12.0g"
int16 -> "%8.0g"
int8 -> "%8.0g"
strl -> "%9s"
"""
# TODO: Refactor to combine type with format
# TODO: expand this to handle a default datetime format?
if dta_version < 117:
max_str_len = 244
else:
max_str_len = 2045
if force_strl:
return "%9s"
if dtype.type == np.object_:
inferred_dtype = infer_dtype(column, skipna=True)
if not (inferred_dtype in ("string", "unicode") or len(column) == 0):
raise ValueError(
"Column `{col}` cannot be exported.\n\nOnly "
"string-like object arrays containing all "
"strings or a mix of strings and None can be "
"exported. Object arrays containing only null "
"values are prohibited. Other object types"
"cannot be exported and must first be converted "
"to one of the supported "
"types.".format(col=column.name)
)
itemsize = max_len_string_array(ensure_object(column.values))
if itemsize > max_str_len:
if dta_version >= 117:
return "%9s"
else:
raise ValueError(excessive_string_length_error % column.name)
return "%" + str(max(itemsize, 1)) + "s"
elif dtype == np.float64:
return "%10.0g"
elif dtype == np.float32:
return "%9.0g"
elif dtype == np.int32:
return "%12.0g"
elif dtype == np.int8 or dtype == np.int16:
return "%8.0g"
else: # pragma : no cover
raise NotImplementedError(
"Data type {dtype} not supported.".format(dtype=dtype)
)
class StataWriter(StataParser):
"""
A class for writing Stata binary dta files
Parameters
----------
fname : path (string), buffer or path object
string, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() functions. If using a buffer
then the buffer will not be automatically closed after the file
is written.
.. versionadded:: 0.23.0 support for pathlib, py.path.
data : DataFrame
Input to save
convert_dates : dict
Dictionary mapping columns containing datetime types to stata internal
format to use when writing the dates. Options are 'tc', 'td', 'tm',
'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name.
Datetime columns that do not have a conversion type specified will be
converted to 'tc'. Raises NotImplementedError if a datetime column has
timezone information
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Only latin-1 and ascii are supported.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current time
data_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as values.
Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
Returns
-------
writer : StataWriter instance
The StataWriter instance has a write_file method, which will
write the file to the given `fname`.
Raises
------
NotImplementedError
* If datetimes contain timezone information
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column dtype is not representable in Stata
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
Examples
--------
>>> data = pd.DataFrame([[1.0, 1]], columns=['a', 'b'])
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> from datetime import datetime
>>> data = pd.DataFrame([[datetime(2000,1,1)]], columns=['date'])
>>> writer = StataWriter('./date_data_file.dta', data, {'date' : 'tw'})
>>> writer.write_file()
"""
_max_string_length = 244
@deprecate_kwarg(old_arg_name="encoding", new_arg_name=None)
def __init__(
self,
fname,
data,
convert_dates=None,
write_index=True,
encoding="latin-1",
byteorder=None,
time_stamp=None,
data_label=None,
variable_labels=None,
):
super().__init__()
self._convert_dates = {} if convert_dates is None else convert_dates
self._write_index = write_index
self._encoding = "latin-1"
self._time_stamp = time_stamp
self._data_label = data_label
self._variable_labels = variable_labels
self._own_file = True
# attach nobs, nvars, data, varlist, typlist
self._prepare_pandas(data)
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
self._fname = _stringify_path(fname)
self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8}
self._converted_names = {}
def _write(self, to_write):
"""
Helper to call encode before writing to file for Python 3 compat.
"""
self._file.write(to_write.encode(self._encoding or self._default_encoding))
def _prepare_categoricals(self, data):
"""Check for categorical columns, retain categorical information for
Stata file and convert categorical data to int"""
is_cat = [is_categorical_dtype(data[col]) for col in data]
self._is_col_cat = is_cat
self._value_labels = []
if not any(is_cat):
return data
get_base_missing_value = StataMissingValue.get_base_missing_value
data_formatted = []
for col, col_is_cat in zip(data, is_cat):
if col_is_cat:
self._value_labels.append(StataValueLabel(data[col]))
dtype = data[col].cat.codes.dtype
if dtype == np.int64:
raise ValueError(
"It is not possible to export "
"int64-based categorical data to Stata."
)
values = data[col].cat.codes.values.copy()
# Upcast if needed so that correct missing values can be set
if values.max() >= get_base_missing_value(dtype):
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
else:
dtype = np.float64
values = np.array(values, dtype=dtype)
# Replace missing values with Stata missing value for type
values[values == -1] = get_base_missing_value(dtype)
data_formatted.append((col, values))
else:
data_formatted.append((col, data[col]))
return DataFrame.from_dict(OrderedDict(data_formatted))
def _replace_nans(self, data):
# return data
"""Checks floating point data columns for nans, and replaces these with
the generic Stata for missing value (.)"""
for c in data:
dtype = data[c].dtype
if dtype in (np.float32, np.float64):
if dtype == np.float32:
replacement = self.MISSING_VALUES["f"]
else:
replacement = self.MISSING_VALUES["d"]
data[c] = data[c].fillna(replacement)
return data
def _update_strl_names(self):
"""No-op, forward compatibility"""
pass
def _check_column_names(self, data):
"""
Checks column names to ensure that they are valid Stata column names.
This includes checks for:
* Non-string names
* Stata keywords
* Variables that start with numbers
* Variables with names that are too long
When an illegal variable name is detected, it is converted, and if
dates are exported, the variable name is propagated to the date
conversion dictionary
"""
converted_names = {}
columns = list(data.columns)
original_columns = columns[:]
duplicate_var_id = 0
for j, name in enumerate(columns):
orig_name = name
if not isinstance(name, str):
name = str(name)
for c in name:
if (
(c < "A" or c > "Z")
and (c < "a" or c > "z")
and (c < "0" or c > "9")
and c != "_"
):
name = name.replace(c, "_")
# Variable name must not be a reserved word
if name in self.RESERVED_WORDS:
name = "_" + name
# Variable name may not start with a number
if name[0] >= "0" and name[0] <= "9":
name = "_" + name
name = name[: min(len(name), 32)]
if not name == orig_name:
# check for duplicates
while columns.count(name) > 0:
# prepend ascending number to avoid duplicates
name = "_" + str(duplicate_var_id) + name
name = name[: min(len(name), 32)]
duplicate_var_id += 1
converted_names[orig_name] = name
columns[j] = name
data.columns = columns
# Check date conversion, and fix key if needed
if self._convert_dates:
for c, o in zip(columns, original_columns):
if c != o:
self._convert_dates[c] = self._convert_dates[o]
del self._convert_dates[o]
if converted_names:
conversion_warning = []
for orig_name, name in converted_names.items():
# need to possibly encode the orig name if its unicode
try:
orig_name = orig_name.encode("utf-8")
except (UnicodeDecodeError, AttributeError):
pass
msg = "{0} -> {1}".format(orig_name, name)
conversion_warning.append(msg)
ws = invalid_name_doc.format("\n ".join(conversion_warning))
warnings.warn(ws, InvalidColumnName)
self._converted_names = converted_names
self._update_strl_names()
return data
def _set_formats_and_types(self, data, dtypes):
self.typlist = []
self.fmtlist = []
for col, dtype in dtypes.items():
self.fmtlist.append(_dtype_to_default_stata_fmt(dtype, data[col]))
self.typlist.append(_dtype_to_stata_type(dtype, data[col]))
def _prepare_pandas(self, data):
# NOTE: we might need a different API / class for pandas objects so
# we can set different semantics - handle this with a PR to pandas.io
data = data.copy()
if self._write_index:
data = data.reset_index()
# Ensure column names are strings
data = self._check_column_names(data)
# Check columns for compatibility with stata, upcast if necessary
# Raise if outside the supported range
data = _cast_to_stata_types(data)
# Replace NaNs with Stata missing values
data = self._replace_nans(data)
# Convert categoricals to int data, and strip labels
data = self._prepare_categoricals(data)
self.nobs, self.nvar = data.shape
self.data = data
self.varlist = data.columns.tolist()
dtypes = data.dtypes
# Ensure all date columns are converted
for col in data:
if col in self._convert_dates:
continue
if is_datetime64_dtype(data[col]):
self._convert_dates[col] = "tc"
self._convert_dates = _maybe_convert_to_int_keys(
self._convert_dates, self.varlist
)
for key in self._convert_dates:
new_type = _convert_datetime_to_stata_type(self._convert_dates[key])
dtypes[key] = np.dtype(new_type)
self._set_formats_and_types(data, dtypes)
# set the given format for the datetime cols
if self._convert_dates is not None:
for key in self._convert_dates:
self.fmtlist[key] = self._convert_dates[key]
def write_file(self):
self._file, self._own_file = _open_file_binary_write(self._fname)
try:
self._write_header(time_stamp=self._time_stamp, data_label=self._data_label)
self._write_map()
self._write_variable_types()
self._write_varnames()
self._write_sortlist()
self._write_formats()
self._write_value_label_names()
self._write_variable_labels()
self._write_expansion_fields()
self._write_characteristics()
self._prepare_data()
self._write_data()
self._write_strls()
self._write_value_labels()
self._write_file_close_tag()
self._write_map()
except Exception as exc:
self._close()
try:
if self._own_file:
os.unlink(self._fname)
except Exception:
warnings.warn(
"This save was not successful but {0} could not "
"be deleted. This file is not "
"valid.".format(self._fname),
ResourceWarning,
)
raise exc
else:
self._close()
def _close(self):
"""
Close the file if it was created by the writer.
If a buffer or file-like object was passed in, for example a GzipFile,
then leave this file open for the caller to close. In either case,
attempt to flush the file contents to ensure they are written to disk
(if supported)
"""
# Some file-like objects might not support flush
try:
self._file.flush()
except AttributeError:
pass
if self._own_file:
self._file.close()
def _write_map(self):
"""No-op, future compatibility"""
pass
def _write_file_close_tag(self):
"""No-op, future compatibility"""
pass
def _write_characteristics(self):
"""No-op, future compatibility"""
pass
def _write_strls(self):
"""No-op, future compatibility"""
pass
def _write_expansion_fields(self):
"""Write 5 zeros for expansion fields"""
self._write(_pad_bytes("", 5))
def _write_value_labels(self):
for vl in self._value_labels:
self._file.write(vl.generate_value_label(self._byteorder, self._encoding))
def _write_header(self, data_label=None, time_stamp=None):
byteorder = self._byteorder
# ds_format - just use 114
self._file.write(struct.pack("b", 114))
# byteorder
self._write(byteorder == ">" and "\x01" or "\x02")
# filetype
self._write("\x01")
# unused
self._write("\x00")
# number of vars, 2 bytes
self._file.write(struct.pack(byteorder + "h", self.nvar)[:2])
# number of obs, 4 bytes
self._file.write(struct.pack(byteorder + "i", self.nobs)[:4])
# data label 81 bytes, char, null terminated
if data_label is None:
self._file.write(self._null_terminate(_pad_bytes("", 80)))
else:
self._file.write(self._null_terminate(_pad_bytes(data_label[:80], 80)))
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime.datetime):
raise ValueError("time_stamp should be datetime type")
# GH #13856
# Avoid locale-specific month conversion
months = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
month_lookup = {i + 1: month for i, month in enumerate(months)}
ts = (
time_stamp.strftime("%d ")
+ month_lookup[time_stamp.month]
+ time_stamp.strftime(" %Y %H:%M")
)
self._file.write(self._null_terminate(ts))
def _write_variable_types(self):
for typ in self.typlist:
self._file.write(struct.pack("B", typ))
def _write_varnames(self):
# varlist names are checked by _check_column_names
# varlist, requires null terminated
for name in self.varlist:
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
def _write_sortlist(self):
# srtlist, 2*(nvar+1), int array, encoded by byteorder
srtlist = _pad_bytes("", 2 * (self.nvar + 1))
self._write(srtlist)
def _write_formats(self):
# fmtlist, 49*nvar, char array
for fmt in self.fmtlist:
self._write(_pad_bytes(fmt, 49))
def _write_value_label_names(self):
# lbllist, 33*nvar, char array
for i in range(self.nvar):
# Use variable name when categorical
if self._is_col_cat[i]:
name = self.varlist[i]
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
else: # Default is empty label
self._write(_pad_bytes("", 33))
def _write_variable_labels(self):
# Missing labels are 80 blank characters plus null termination
blank = _pad_bytes("", 81)
if self._variable_labels is None:
for i in range(self.nvar):
self._write(blank)
return
for col in self.data:
if col in self._variable_labels:
label = self._variable_labels[col]
if len(label) > 80:
raise ValueError(
"Variable labels must be 80 characters " "or fewer"
)
is_latin1 = all(ord(c) < 256 for c in label)
if not is_latin1:
raise ValueError(
"Variable labels must contain only "
"characters that can be encoded in "
"Latin-1"
)
self._write(_pad_bytes(label, 81))
else:
self._write(blank)
def _convert_strls(self, data):
"""No-op, future compatibility"""
return data
def _prepare_data(self):
data = self.data
typlist = self.typlist
convert_dates = self._convert_dates
# 1. Convert dates
if self._convert_dates is not None:
for i, col in enumerate(data):
if i in convert_dates:
data[col] = _datetime_to_stata_elapsed_vec(
data[col], self.fmtlist[i]
)
# 2. Convert strls
data = self._convert_strls(data)
# 3. Convert bad string data to '' and pad to correct length
dtypes = {}
native_byteorder = self._byteorder == _set_endianness(sys.byteorder)
for i, col in enumerate(data):
typ = typlist[i]
if typ <= self._max_string_length:
data[col] = data[col].fillna("").apply(_pad_bytes, args=(typ,))
stype = "S{type}".format(type=typ)
dtypes[col] = stype
data[col] = data[col].str.encode(self._encoding).astype(stype)
else:
dtype = data[col].dtype
if not native_byteorder:
dtype = dtype.newbyteorder(self._byteorder)
dtypes[col] = dtype
self.data = data.to_records(index=False, column_dtypes=dtypes)
def _write_data(self):
data = self.data
self._file.write(data.tobytes())
def _null_terminate(self, s, as_string=False):
null_byte = "\x00"
s += null_byte
if not as_string:
s = s.encode(self._encoding)
return s
def _dtype_to_stata_type_117(dtype, column, force_strl):
"""
Converts dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 2045 are strings of this length
Pandas Stata
32768 - for object strL
65526 - for int8 byte
65527 - for int16 int
65528 - for int32 long
65529 - for float32 float
65530 - for double double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
# TODO: expand to handle datetime to integer conversion
if force_strl:
return 32768
if dtype.type == np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we
# do?
itemsize = max_len_string_array(ensure_object(column.values))
itemsize = max(itemsize, 1)
if itemsize <= 2045:
return itemsize
return 32768
elif dtype == np.float64:
return 65526
elif dtype == np.float32:
return 65527
elif dtype == np.int32:
return 65528
elif dtype == np.int16:
return 65529
elif dtype == np.int8:
return 65530
else: # pragma : no cover
raise NotImplementedError("Data type %s not supported." % dtype)
def _pad_bytes_new(name, length):
"""
Takes a bytes instance and pads it with null bytes until it's length chars.
"""
if isinstance(name, str):
name = bytes(name, "utf-8")
return name + b"\x00" * (length - len(name))
class StataStrLWriter:
"""
Converter for Stata StrLs
Stata StrLs map 8 byte values to strings which are stored using a
dictionary-like format where strings are keyed to two values.
Parameters
----------
df : DataFrame
DataFrame to convert
columns : list
List of columns names to convert to StrL
version : int, optional
dta version. Currently supports 117, 118 and 119
byteorder : str, optional
Can be ">", "<", "little", or "big". default is `sys.byteorder`
Notes
-----
Supports creation of the StrL block of a dta file for dta versions
117, 118 and 119. These differ in how the GSO is stored. 118 and
119 store the GSO lookup value as a uint32 and a uint64, while 117
uses two uint32s. 118 and 119 also encode all strings as unicode
which is required by the format. 117 uses 'latin-1' a fixed width
encoding that extends the 7-bit ascii table with an additional 128
characters.
"""
def __init__(self, df, columns, version=117, byteorder=None):
if version not in (117, 118, 119):
raise ValueError("Only dta versions 117, 118 and 119 supported")
self._dta_ver = version
self.df = df
self.columns = columns
self._gso_table = OrderedDict((("", (0, 0)),))
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
gso_v_type = "I" # uint32
gso_o_type = "Q" # uint64
self._encoding = "utf-8"
if version == 117:
o_size = 4
gso_o_type = "I" # 117 used uint32
self._encoding = "latin-1"
elif version == 118:
o_size = 6
else: # version == 119
o_size = 5
self._o_offet = 2 ** (8 * (8 - o_size))
self._gso_o_type = gso_o_type
self._gso_v_type = gso_v_type
def _convert_key(self, key):
v, o = key
return v + self._o_offet * o
def generate_table(self):
"""
Generates the GSO lookup table for the DataFRame
Returns
-------
gso_table : OrderedDict
Ordered dictionary using the string found as keys
and their lookup position (v,o) as values
gso_df : DataFrame
DataFrame where strl columns have been converted to
(v,o) values
Notes
-----
Modifies the DataFrame in-place.
The DataFrame returned encodes the (v,o) values as uint64s. The
encoding depends on teh dta version, and can be expressed as
enc = v + o * 2 ** (o_size * 8)
so that v is stored in the lower bits and o is in the upper
bits. o_size is
* 117: 4
* 118: 6
* 119: 5
"""
gso_table = self._gso_table
gso_df = self.df
columns = list(gso_df.columns)
selected = gso_df[self.columns]
col_index = [(col, columns.index(col)) for col in self.columns]
keys = np.empty(selected.shape, dtype=np.uint64)
for o, (idx, row) in enumerate(selected.iterrows()):
for j, (col, v) in enumerate(col_index):
val = row[col]
# Allow columns with mixed str and None (GH 23633)
val = "" if val is None else val
key = gso_table.get(val, None)
if key is None:
# Stata prefers human numbers
key = (v + 1, o + 1)
gso_table[val] = key
keys[o, j] = self._convert_key(key)
for i, col in enumerate(self.columns):
gso_df[col] = keys[:, i]
return gso_table, gso_df
def _encode(self, s):
"""
Python 3 compatibility shim
"""
return s.encode(self._encoding)
def generate_blob(self, gso_table):
"""
Generates the binary blob of GSOs that is written to the dta file.
Parameters
----------
gso_table : OrderedDict
Ordered dictionary (str, vo)
Returns
-------
gso : bytes
Binary content of dta file to be placed between strl tags
Notes
-----
Output format depends on dta version. 117 uses two uint32s to
express v and o while 118+ uses a uint32 for v and a uint64 for o.
"""
# Format information
# Length includes null term
# 117
# GSOvvvvooootllllxxxxxxxxxxxxxxx...x
# 3 u4 u4 u1 u4 string + null term
#
# 118, 119
# GSOvvvvooooooootllllxxxxxxxxxxxxxxx...x
# 3 u4 u8 u1 u4 string + null term
bio = BytesIO()
gso = bytes("GSO", "ascii")
gso_type = struct.pack(self._byteorder + "B", 130)
null = struct.pack(self._byteorder + "B", 0)
v_type = self._byteorder + self._gso_v_type
o_type = self._byteorder + self._gso_o_type
len_type = self._byteorder + "I"
for strl, vo in gso_table.items():
if vo == (0, 0):
continue
v, o = vo
# GSO
bio.write(gso)
# vvvv
bio.write(struct.pack(v_type, v))
# oooo / oooooooo
bio.write(struct.pack(o_type, o))
# t
bio.write(gso_type)
# llll
utf8_string = bytes(strl, "utf-8")
bio.write(struct.pack(len_type, len(utf8_string) + 1))
# xxx...xxx
bio.write(utf8_string)
bio.write(null)
bio.seek(0)
return bio.read()
class StataWriter117(StataWriter):
"""
A class for writing Stata binary dta files in Stata 13 format (117)
.. versionadded:: 0.23.0
Parameters
----------
fname : path (string), buffer or path object
string, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() functions. If using a buffer
then the buffer will not be automatically closed after the file
is written.
data : DataFrame
Input to save
convert_dates : dict
Dictionary mapping columns containing datetime types to stata internal
format to use when writing the dates. Options are 'tc', 'td', 'tm',
'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name.
Datetime columns that do not have a conversion type specified will be
converted to 'tc'. Raises NotImplementedError if a datetime column has
timezone information
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Only latin-1 and ascii are supported.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current time
data_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as values.
Each label must be 80 characters or smaller.
convert_strl : list
List of columns names to convert to Stata StrL format. Columns with
more than 2045 characters are automatically written as StrL.
Smaller columns can be converted by including the column name. Using
StrLs can reduce output file size when strings are longer than 8
characters, and either frequently repeated or sparse.
Returns
-------
writer : StataWriter117 instance
The StataWriter117 instance has a write_file method, which will
write the file to the given `fname`.
Raises
------
NotImplementedError
* If datetimes contain timezone information
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column dtype is not representable in Stata
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
Examples
--------
>>> from pandas.io.stata import StataWriter117
>>> data = pd.DataFrame([[1.0, 1, 'a']], columns=['a', 'b', 'c'])
>>> writer = StataWriter117('./data_file.dta', data)
>>> writer.write_file()
Or with long strings stored in strl format
>>> data = pd.DataFrame([['A relatively long string'], [''], ['']],
... columns=['strls'])
>>> writer = StataWriter117('./data_file_with_long_strings.dta', data,
... convert_strl=['strls'])
>>> writer.write_file()
"""
_max_string_length = 2045
@deprecate_kwarg(old_arg_name="encoding", new_arg_name=None)
def __init__(
self,
fname,
data,
convert_dates=None,
write_index=True,
encoding="latin-1",
byteorder=None,
time_stamp=None,
data_label=None,
variable_labels=None,
convert_strl=None,
):
# Shallow copy since convert_strl might be modified later
self._convert_strl = [] if convert_strl is None else convert_strl[:]
super().__init__(
fname,
data,
convert_dates,
write_index,
byteorder=byteorder,
time_stamp=time_stamp,
data_label=data_label,
variable_labels=variable_labels,
)
self._map = None
self._strl_blob = None
@staticmethod
def _tag(val, tag):
"""Surround val with <tag></tag>"""
if isinstance(val, str):
val = bytes(val, "utf-8")
return bytes("<" + tag + ">", "utf-8") + val + bytes("</" + tag + ">", "utf-8")
def _update_map(self, tag):
"""Update map location for tag with file position"""
self._map[tag] = self._file.tell()
def _write_header(self, data_label=None, time_stamp=None):
"""Write the file header"""
byteorder = self._byteorder
self._file.write(bytes("<stata_dta>", "utf-8"))
bio = BytesIO()
# ds_format - 117
bio.write(self._tag(bytes("117", "utf-8"), "release"))
# byteorder
bio.write(self._tag(byteorder == ">" and "MSF" or "LSF", "byteorder"))
# number of vars, 2 bytes
assert self.nvar < 2 ** 16
bio.write(self._tag(struct.pack(byteorder + "H", self.nvar), "K"))
# number of obs, 4 bytes
bio.write(self._tag(struct.pack(byteorder + "I", self.nobs), "N"))
# data label 81 bytes, char, null terminated
label = data_label[:80] if data_label is not None else ""
label_len = struct.pack(byteorder + "B", len(label))
label = label_len + bytes(label, "utf-8")
bio.write(self._tag(label, "label"))
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime.datetime):
raise ValueError("time_stamp should be datetime type")
# Avoid locale-specific month conversion
months = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
month_lookup = {i + 1: month for i, month in enumerate(months)}
ts = (
time_stamp.strftime("%d ")
+ month_lookup[time_stamp.month]
+ time_stamp.strftime(" %Y %H:%M")
)
# '\x11' added due to inspection of Stata file
ts = b"\x11" + bytes(ts, "utf8")
bio.write(self._tag(ts, "timestamp"))
bio.seek(0)
self._file.write(self._tag(bio.read(), "header"))
def _write_map(self):
"""Called twice during file write. The first populates the values in
the map with 0s. The second call writes the final map locations when
all blocks have been written."""
if self._map is None:
self._map = OrderedDict(
(
("stata_data", 0),
("map", self._file.tell()),
("variable_types", 0),
("varnames", 0),
("sortlist", 0),
("formats", 0),
("value_label_names", 0),
("variable_labels", 0),
("characteristics", 0),
("data", 0),
("strls", 0),
("value_labels", 0),
("stata_data_close", 0),
("end-of-file", 0),
)
)
# Move to start of map
self._file.seek(self._map["map"])
bio = BytesIO()
for val in self._map.values():
bio.write(struct.pack(self._byteorder + "Q", val))
bio.seek(0)
self._file.write(self._tag(bio.read(), "map"))
def _write_variable_types(self):
self._update_map("variable_types")
bio = BytesIO()
for typ in self.typlist:
bio.write(struct.pack(self._byteorder + "H", typ))
bio.seek(0)
self._file.write(self._tag(bio.read(), "variable_types"))
def _write_varnames(self):
self._update_map("varnames")
bio = BytesIO()
for name in self.varlist:
name = self._null_terminate(name, True)
name = _pad_bytes_new(name[:32], 33)
bio.write(name)
bio.seek(0)
self._file.write(self._tag(bio.read(), "varnames"))
def _write_sortlist(self):
self._update_map("sortlist")
self._file.write(self._tag(b"\x00\00" * (self.nvar + 1), "sortlist"))
def _write_formats(self):
self._update_map("formats")
bio = BytesIO()
for fmt in self.fmtlist:
bio.write(_pad_bytes_new(fmt, 49))
bio.seek(0)
self._file.write(self._tag(bio.read(), "formats"))
def _write_value_label_names(self):
self._update_map("value_label_names")
bio = BytesIO()
for i in range(self.nvar):
# Use variable name when categorical
name = "" # default name
if self._is_col_cat[i]:
name = self.varlist[i]
name = self._null_terminate(name, True)
name = _pad_bytes_new(name[:32], 33)
bio.write(name)
bio.seek(0)
self._file.write(self._tag(bio.read(), "value_label_names"))
def _write_variable_labels(self):
# Missing labels are 80 blank characters plus null termination
self._update_map("variable_labels")
bio = BytesIO()
blank = _pad_bytes_new("", 81)
if self._variable_labels is None:
for _ in range(self.nvar):
bio.write(blank)
bio.seek(0)
self._file.write(self._tag(bio.read(), "variable_labels"))
return
for col in self.data:
if col in self._variable_labels:
label = self._variable_labels[col]
if len(label) > 80:
raise ValueError(
"Variable labels must be 80 characters " "or fewer"
)
is_latin1 = all(ord(c) < 256 for c in label)
if not is_latin1:
raise ValueError(
"Variable labels must contain only "
"characters that can be encoded in "
"Latin-1"
)
bio.write(_pad_bytes_new(label, 81))
else:
bio.write(blank)
bio.seek(0)
self._file.write(self._tag(bio.read(), "variable_labels"))
def _write_characteristics(self):
self._update_map("characteristics")
self._file.write(self._tag(b"", "characteristics"))
def _write_data(self):
self._update_map("data")
data = self.data
self._file.write(b"<data>")
self._file.write(data.tobytes())
self._file.write(b"</data>")
def _write_strls(self):
self._update_map("strls")
strls = b""
if self._strl_blob is not None:
strls = self._strl_blob
self._file.write(self._tag(strls, "strls"))
def _write_expansion_fields(self):
"""No-op in dta 117+"""
pass
def _write_value_labels(self):
self._update_map("value_labels")
bio = BytesIO()
for vl in self._value_labels:
lab = vl.generate_value_label(self._byteorder, self._encoding)
lab = self._tag(lab, "lbl")
bio.write(lab)
bio.seek(0)
self._file.write(self._tag(bio.read(), "value_labels"))
def _write_file_close_tag(self):
self._update_map("stata_data_close")
self._file.write(bytes("</stata_dta>", "utf-8"))
self._update_map("end-of-file")
def _update_strl_names(self):
"""Update column names for conversion to strl if they might have been
changed to comply with Stata naming rules"""
# Update convert_strl if names changed
for orig, new in self._converted_names.items():
if orig in self._convert_strl:
idx = self._convert_strl.index(orig)
self._convert_strl[idx] = new
def _convert_strls(self, data):
"""Convert columns to StrLs if either very large or in the
convert_strl variable"""
convert_cols = [
col
for i, col in enumerate(data)
if self.typlist[i] == 32768 or col in self._convert_strl
]
if convert_cols:
ssw = StataStrLWriter(data, convert_cols)
tab, new_data = ssw.generate_table()
data = new_data
self._strl_blob = ssw.generate_blob(tab)
return data
def _set_formats_and_types(self, data, dtypes):
self.typlist = []
self.fmtlist = []
for col, dtype in dtypes.items():
force_strl = col in self._convert_strl
fmt = _dtype_to_default_stata_fmt(
dtype, data[col], dta_version=117, force_strl=force_strl
)
self.fmtlist.append(fmt)
self.typlist.append(_dtype_to_stata_type_117(dtype, data[col], force_strl))
| apache-2.0 |
JelteF/statistics | 4/pca.py | 1 | 2669 | import numpy as np
from scipy.misc import imread
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import os.path
from pylatex import Subsection, Plt, Figure
n = 256
m = 25
def compare(img, pos, mean_vec, d, U, k, show=False):
x, y = pos
detail = img[x:x+m, y:y+m].reshape((m*m, 1))
det_zm = detail - mean_vec
y_zm = U.T.dot(det_zm)
y_zm_k = y_zm[:k]
det_zm_k = U[:, :k].dot(y_zm_k)
det_k = det_zm_k + mean_vec
if show:
plt.subplot(2, 1, 1)
plt.imshow(detail.reshape((m, m)), cmap=cm.Greys_r)
plt.subplot(2, 1, 2)
plt.imshow(det_k.reshape((m, m)), cmap=cm.Greys_r)
plt.show()
return det_k
def sorted_eig(mat):
d, U = np.linalg.eig(mat)
si = np.argsort(d)[-1::-1]
d = d[si]
U = U[:, si]
return d, U
def main():
img = imread('trui.png')
if not os.path.isfile('d.npy'):
sum_mat = np.zeros((m*m, m*m), np.int)
mean_sum_vec = np.zeros((m*m, 1), np.int)
for i in range(n-m+1):
for j in range(n-m+1):
detail = img[i:i+m, j:j+m].reshape((m*m, 1))
sum_mat += detail * detail.T
mean_sum_vec += detail
mean_vec = mean_sum_vec / float(n*n)
mean_mat = n * mean_vec * mean_vec.T
S = (sum_mat - mean_mat) / ((m*m)-1)
d, U = sorted_eig(S)
np.save('d', d)
np.save('U', U)
np.save('mean_vec', mean_vec)
else:
d = np.load('d.npy')
U = np.load('U.npy')
mean_vec = np.load('mean_vec.npy')
plt.bar(range(6), abs(d[:6]))
# plt.show()
with open('scree.tex', 'w') as f:
plot = Plt(position='htbp')
plot.add_plot(plt)
plot.add_caption('Scree diagram')
plot.dump(f)
sec = Subsection('Gereconstrueerde foto\'s')
with sec.create(Figure(position='htbp')) as fig:
fig.add_image('trui.png')
fig.add_caption('Origineel')
for k in [0, 1, 3, 5, 7, 10, 20, 30, 50, 80, 120, 170,
220, 300, 370, 450, 520, 590, 625]:
reconstructed = np.zeros((n, n))
for i in range(0, 232, 25):
for j in range(0, 232, 25):
subimg = compare(img, (i, j), mean_vec, d, U, k)
reconstructed[i:i+25, j:j+25] = subimg.reshape((25, 25))
plt.imshow(reconstructed, cmap=cm.Greys_r)
plt.title('k = ' + str(k))
# plt.show()
with sec.create(Plt(position='htbp')) as plot:
plot.add_plot(plt)
plot.add_caption('k = ' + str(k))
with open('images.tex', 'w') as f:
sec.dump(f)
if __name__ == '__main__':
main()
| mit |
Obus/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 249 | 2211 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
broesamle/servo | tests/heartbeats/process_logs.py | 139 | 16143 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
from os import path
import sys
import warnings
HB_LOG_IDX_START_TIME = 7
HB_LOG_IDX_END_TIME = HB_LOG_IDX_START_TIME + 1
HB_LOG_IDX_START_ENERGY = 14
HB_LOG_IDX_END_ENERGY = HB_LOG_IDX_START_ENERGY + 1
ENERGY_PROFILER_NAME = 'ApplicationHeartbeat'
SUMMARY_OUTPUT = "summary.txt"
SUMMARY_TIME_IDX = 8
SUMMARY_ENERGY_IDX = SUMMARY_TIME_IDX + 1
SUMMARY_POWER_IDX = SUMMARY_ENERGY_IDX + 1
def autolabel(rects, ax):
"""Attach some text labels.
"""
for rect in rects:
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * rect.get_height(), '', ha='center', va='bottom')
def plot_raw_totals(config, plot_data, max_time, max_time_std, max_energy, max_energy_std, output_dir, normalize):
"""Plot the raw totals for a configuration.
Keyword arguments:
config -- configuration name
plot_data -- (profiler name, total_time, total_time_std, total_energy, total_energy_std)
max_time, max_time_std, max_energy, max_energy_std -- single values
normalize -- True/False
"""
plot_data = sorted(plot_data)
keys = [p for (p, tt, tts, te, tes) in plot_data]
total_times = [tt for (p, tt, tts, te, tes) in plot_data]
total_times_std = [tts for (p, tt, tts, te, tes) in plot_data]
total_energies = [te for (p, tt, tts, te, tes) in plot_data]
total_energies_std = [tes for (p, tt, tts, te, tes) in plot_data]
fig, ax1 = plt.subplots()
ind = np.arange(len(keys)) # the x locations for the groups
width = 0.35 # the width of the bars
# add some text for labels, title and axes ticks
ax1.set_title('Time/Energy Data for Configuration ' + config)
ax1.set_xticks(ind + width)
ax1.set_xticklabels(keys, rotation=45)
fig.set_tight_layout(True)
fig.set_size_inches(len(plot_data) / 1.5, 8)
ax2 = ax1.twinx()
# Normalize
if normalize:
total_times_std /= np.sum(total_times)
total_times /= np.sum(total_times)
total_energies_std /= np.sum(total_energies)
total_energies /= np.sum(total_energies)
ax1.set_ylabel('Time (Normalized)')
ax2.set_ylabel('Energy (Normalized)')
else:
# set time in us instead of ns
total_times_std /= np.array(1000000.0)
total_times /= np.array(1000000.0)
total_energies_std /= np.array(1000000.0)
total_energies /= np.array(1000000.0)
ax1.set_ylabel('Time (ms)')
ax2.set_ylabel('Energy (Joules)')
rects1 = ax1.bar(ind, total_times, width, color='r', yerr=total_times_std)
rects2 = ax2.bar(ind + width, total_energies, width, color='y', yerr=total_energies_std)
ax1.legend([rects1[0], rects2[0]], ['Time', 'Energy'])
# set axis
x1, x2, y1, y2 = plt.axis()
if normalize:
ax1.set_ylim(ymin=0, ymax=1)
ax2.set_ylim(ymin=0, ymax=1)
else:
ax1.set_ylim(ymin=0, ymax=((max_time + max_time_std) * 1.25 / 1000000.0))
ax2.set_ylim(ymin=0, ymax=((max_energy + max_energy_std) * 1.25 / 1000000.0))
autolabel(rects1, ax1)
autolabel(rects2, ax2)
# plt.show()
plt.savefig(path.join(output_dir, config + ".png"))
plt.close(fig)
def create_raw_total_data(config_data):
"""Get the raw data to plot for a configuration
Return: [(profiler, time_mean, time_stddev, energy_mean, energy_stddev)]
Keyword arguments:
config_data -- (trial, trial_data)
"""
# We can't assume that the same number of heartbeats are always issued across trials
# key: profiler name; value: list of timing sums for each trial
profiler_total_times = {}
# key: profiler name; value: list of energy sums for each trial
profiler_total_energies = {}
for (t, td) in config_data:
for (profiler, ts, te, es, ee) in td:
# sum the total times and energies for each profiler in this trial
total_time = np.sum(te - ts)
total_energy = np.sum(ee - es)
# add to list to be averaged later
time_list = profiler_total_times.get(profiler, [])
time_list.append(total_time)
profiler_total_times[profiler] = time_list
energy_list = profiler_total_energies.get(profiler, [])
energy_list.append(total_energy)
profiler_total_energies[profiler] = energy_list
# Get mean and stddev for time and energy totals
return [(profiler,
np.mean(profiler_total_times[profiler]),
np.std(profiler_total_times[profiler]),
np.mean(profiler_total_energies[profiler]),
np.std(profiler_total_energies[profiler]))
for profiler in profiler_total_times.keys()]
def plot_all_raw_totals(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
raw_total_norm_out_dir = path.join(output_dir, 'raw_totals_normalized')
os.makedirs(raw_total_norm_out_dir)
raw_total_out_dir = path.join(output_dir, 'raw_totals')
os.makedirs(raw_total_out_dir)
# (name, (profiler, (time_mean, time_stddev, energy_mean, energy_stddev)))
raw_totals_data = [(config, create_raw_total_data(config_data)) for (config, config_data) in config_list]
mean_times = []
mean_times_std = []
mean_energies = []
mean_energies_std = []
for profiler_tup in [config_tup[1] for config_tup in raw_totals_data]:
for (p, tt, tts, te, tes) in profiler_tup:
mean_times.append(tt)
mean_times_std.append(tts)
mean_energies.append(te)
mean_energies_std.append(tes)
# get consistent max time/energy values across plots
max_t = np.max(mean_times)
max_t_std = np.max(mean_times_std)
max_e = np.max(mean_energies)
max_e_std = np.max(mean_energies_std)
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_norm_out_dir, True)
for data in raw_totals_data]
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_out_dir, False)
for data in raw_totals_data]
def plot_trial_time_series(config, trial, trial_data, max_end_time, max_power, output_dir):
"""Plot time series for a single trial.
Keyword arguments:
config -- the config name
trial -- the trial name
trial_data -- [(profiler, [start times], [end times], [start energies], [end energies])]
max_end_time -- single value to use as max X axis value (for consistency across trials)
output_dir -- the output directory
"""
# TODO: Some profilers may have parallel tasks - need to identify this on plots
max_end_time = max_end_time / 1000000.0
trial_data = sorted(trial_data)
fig, ax1 = plt.subplots()
keys = [p for (p, ts, te, es, ee) in trial_data]
# add some text for labels, title and axes ticks
ax1.set_title('Profiler Activity for ' + config + ', ' + trial)
ax1.set_xlabel('Time (ms)')
ax1.grid(True)
width = 8 # the width of the bars
ax1.set_yticks(10 * np.arange(1, len(keys) + 2))
ax1.set_yticklabels(keys)
ax1.set_ylim(ymin=0, ymax=((len(trial_data) + 1) * 10))
ax1.set_xlim(xmin=0, xmax=max_end_time)
fig.set_tight_layout(True)
fig.set_size_inches(16, len(trial_data) / 3)
i = 10
for (p, ts, te, es, ee) in trial_data:
xranges = [(ts[j] / 1000000.0, (te[j] - ts[j]) / 1000000.0) for j in xrange(len(ts))]
ax1.broken_barh(xranges, (i - 0.5 * width, width))
i += 10
# place a vbar at the final time for this trial
last_profiler_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in trial_data]))
plt.axvline(np.max(last_profiler_times) / 1000000.0, color='black')
power_times = []
power_values = []
for (p, ts, te, es, ee) in trial_data:
if p == ENERGY_PROFILER_NAME:
power_times = te / 1000000.0
power_values = (ee - es) / ((te - ts) / 1000.0)
ax2 = ax1.twinx()
ax2.set_xlim(xmin=0, xmax=max_end_time)
ax2.set_ylim(ymin=0, ymax=max_power)
ax2.set_ylabel('Power (Watts)')
ax2.plot(power_times, power_values, color='r')
# plt.show()
plt.savefig(path.join(output_dir, "ts_" + config + "_" + trial + ".png"))
plt.close(fig)
def hb_energy_times_to_power(es, ee, ts, te):
"""Compute power from start and end energy and times.
Return: power values
"""
return (ee - es) / ((te - ts) / 1000.0)
def plot_all_time_series(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
time_series_out_dir = path.join(output_dir, 'time_series')
os.makedirs(time_series_out_dir)
max_end_times = []
max_power_values = []
for (c, cd) in config_list:
for (t, td) in cd:
trial_max_end_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in td]))
max_end_times.append(np.nanmax(trial_max_end_times))
for (p, ts, te, es, ee) in td:
# We only care about the energy profiler (others aren't reliable for instant power anyway)
if p == ENERGY_PROFILER_NAME and len(te) > 0:
max_power_values.append(np.nanmax(hb_energy_times_to_power(es, ee, ts, te)))
max_time = np.nanmax(max_end_times)
max_power = np.nanmax(np.array(max_power_values)) * 1.2 # leave a little space at the top
for (config, config_data) in config_list:
[plot_trial_time_series(config, trial, trial_data, max_time, max_power, time_series_out_dir)
for (trial, trial_data) in config_data]
def read_heartbeat_log(profiler_hb_log):
"""Read a heartbeat log file.
Return: (profiler name, [start times], [end times], [start energies], [end energies], [instant powers])
Keyword arguments:
profiler_hb_log -- the file to read
"""
with warnings.catch_warnings():
try:
warnings.simplefilter("ignore")
time_start, time_end, energy_start, energy_end = \
np.loadtxt(profiler_hb_log,
dtype=np.dtype('uint64'),
skiprows=1,
usecols=(HB_LOG_IDX_START_TIME,
HB_LOG_IDX_END_TIME,
HB_LOG_IDX_START_ENERGY,
HB_LOG_IDX_END_ENERGY),
unpack=True,
ndmin=1)
except ValueError:
time_start, time_end, energy_start, energy_end = [], [], [], []
name = path.split(profiler_hb_log)[1].split('-')[1].split('.')[0]
return (name,
np.atleast_1d(time_start),
np.atleast_1d(time_end),
np.atleast_1d(energy_start),
np.atleast_1d(energy_end))
def process_trial_dir(trial_dir):
"""Process trial directory.
Return: [(profiler name, [start times], [end times], [start energies], [end energies])]
Time and energy are normalized to 0 start values.
Keyword arguments:
trial_dir -- the directory for this trial
"""
log_data = map(lambda h: read_heartbeat_log(path.join(trial_dir, h)),
filter(lambda f: f.endswith(".log"), os.listdir(trial_dir)))
# Find the earliest timestamps and energy readings
min_t = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [ts for (profiler, ts, te, es, ee) in log_data])))
min_e = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [es for (profiler, ts, te, es, ee) in log_data])))
# Normalize timing/energy data to start values of 0
return [(profiler, ts - min_t, te - min_t, es - min_e, ee - min_e) for (profiler, ts, te, es, ee) in log_data]
def process_config_dir(config_dir):
"""Process a configuration directory.
Return: [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])]
Keyword arguments:
config_dir -- the directory for this configuration - contains subdirectories for each trial
"""
return [(trial_dir, process_trial_dir(path.join(config_dir, trial_dir))) for trial_dir in os.listdir(config_dir)]
def process_logs(log_dir):
"""Process log directory.
Return: [(config, [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])])]
Keyword arguments:
log_dir -- the log directory to process - contains subdirectories for each configuration
"""
return [((config_dir.split('_')[1], process_config_dir(path.join(log_dir, config_dir))))
for config_dir in os.listdir(log_dir)]
def find_best_executions(log_dir):
"""Get the best time, energy, and power from the characterization summaries.
Return: ((config, trial, min_time), (config, trial, min_energy), (config, trial, min_power))
Keyword arguments:
results -- the results from process_logs(...).
"""
DEFAULT = ('', '', 1000000000.0)
min_time = DEFAULT
min_energy = DEFAULT
min_power = DEFAULT
for config_dir in os.listdir(log_dir):
for trial_dir in os.listdir(path.join(log_dir, config_dir)):
with open(path.join(log_dir, config_dir, trial_dir, SUMMARY_OUTPUT), "r") as s:
lines = s.readlines()
time = float(lines[SUMMARY_TIME_IDX].split(':')[1])
energy = int(lines[SUMMARY_ENERGY_IDX].split(':')[1])
power = float(lines[SUMMARY_POWER_IDX].split(':')[1])
if time < min_time[2]:
min_time = (config_dir, trial_dir, time)
if energy < min_energy[2]:
min_energy = (config_dir, trial_dir, energy)
if power < min_power:
min_power = (config_dir, trial_dir, power)
return (min_time, min_energy, min_power)
def main():
"""This script processes the log files from the "characterize.py" script and produces visualizations.
"""
# Default log directory
directory = 'heartbeat_logs'
# Default output directory
output_dir = 'plots'
# Default android
android = False
# Parsing the input of the script
parser = argparse.ArgumentParser(description="Process Heartbeat log files from characterization")
parser.add_argument("-d", "--directory",
default=directory,
help="Heartbeat log directory \"-d heartbeat_logs\"")
parser.add_argument("-o", "--output",
default=output_dir,
help="Specify the log output directory, for example \"-o plots\"")
parser.add_argument("--android",
action="store_true",
dest="android",
default=False,
help="Specify if processing results from Android")
args = parser.parse_args()
if args.directory:
directory = args.directory
if args.output:
output_dir = args.output
if args.android:
android = args.android
if not os.path.exists(directory):
print "Input directory does not exist: " + directory
sys.exit(1)
if os.path.exists(output_dir):
print "Output directory already exists: " + output_dir
sys.exit(1)
res = process_logs(directory)
if not android:
best = find_best_executions(directory)
print 'Best time:', best[0]
print 'Best energy:', best[1]
print 'Best power:', best[2]
os.makedirs(output_dir)
plot_all_raw_totals(res, output_dir)
plot_all_time_series(res, output_dir)
if __name__ == "__main__":
main()
| mpl-2.0 |
apevec/RMS | Utils/SaturationSimulation.py | 2 | 4910 | """ Simulate saturation with a moving Gaussian. """
from __future__ import print_function, division, absolute_import
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize
# Import Cython functions
import pyximport
pyximport.install(setup_args={'include_dirs':[np.get_include()]})
from Utils.SaturationTools import addGaussian
def simulateSaturation(app_mag, mag_app_saturated_input, photom_offset, bg_val, fps, ang_vel, gauss_sigma, \
steps, saturation_point, show_plot=False):
# Compute the log sum pixel
lsp = app_mag - photom_offset
# Compute the intensity sum from the magnitude
intens_sum = 10**(-lsp/2.5)
# Compute the border as 3 sigma + padding
border = 3*gauss_sigma + 10
# If the intensity sum is larger than the sum of half the windows saturating, scale it up
intens_sum_saturated = 10**(-(mag_app_saturated_input - photom_offset)/2.5)
if intens_sum_saturated > ((border/2.0)**2)*saturation_point:
border = 4*np.sqrt(intens_sum_saturated/saturation_point)
# Limit the border to 50 px
if border > 50:
border = 50
# Estimate the image size
track_length = int(ang_vel/fps + 2*border)
# Init the frame buffer
frame = np.zeros(shape=(track_length, track_length), dtype=np.float64)
# Init evaluation plane
X = np.linspace(0, track_length - 1, track_length)
Y = np.linspace(0, track_length - 1, track_length)
X, Y = np.meshgrid(X, Y)
# Pack X and Y into a single 3-dimensional array
pos = np.empty(X.shape + (2,))
pos[:, :, 0] = X
pos[:, :, 1] = Y
# Compute the Gaussian amplitude
A = intens_sum/(2*np.pi*gauss_sigma**2)
# Normalize the ampllitude to the number of steps
A /= steps
# Move and evaluate the Gaussian at every step
for n in range(steps):
# Compute the location of the Gaussian
x = y = (n/steps)*(ang_vel/fps) + border
frame = addGaussian(frame, x, y, A, gauss_sigma, 3*border)
# Compute the real magnitude
lsp_unsaturated = -2.5*np.log10(np.sum(frame))
mag_app_unsaturated = lsp_unsaturated + photom_offset
#print('Unsaturated magnitude:', mag_app_unsaturated)
# Add the background
frame += bg_val
# Clip the image
frame = np.clip(frame, 0, saturation_point)
# Compute the saturated magnitude
lsp_saturated = -2.5*np.log10(np.sum(frame - bg_val))
mag_app_saturated = lsp_saturated + photom_offset
#print('Modelled saturated magnitude:', mag_app_saturated)
if show_plot:
plt.imshow(frame, cmap='gray', vmin=0, vmax=255)
plt.show()
return mag_app_unsaturated, mag_app_saturated
def findUnsaturatedMagnitude(app_mag, photom_offset, bg_val, fps, ang_vel, gauss_sigma, steps=50, \
saturation_point=255):
def _costFunc(mag_app_unsaturated, params):
mag_app = params[0]
# Compute the unsatured magnitude
_, mag_app_saturated = simulateSaturation(mag_app_unsaturated, *params)
# Compute the residuals
return abs(mag_app - mag_app_saturated)
# Numerically find the position angle
res = scipy.optimize.minimize(_costFunc, [-1], args=([app_mag, photom_offset, bg_val, fps, ang_vel, \
gauss_sigma, steps, saturation_point]), method='Nelder-Mead')
# ## TEST
# simulateSaturation(res.x[0], app_mag, photom_offset, bg_val, fps, ang_vel, \
# gauss_sigma, steps, saturation_point, show_plot=True)
return res.x[0]
if __name__ == "__main__":
photom_offset = 10.7
background_lvl = 60
fps = 25
ang_vel = 250 # px/s
#print(findUnsaturatedMagnitude(-0.5, photom_offset, 50, 25, 100, 1))
#print(simulateSaturation(-10.0, photom_offset, 50, 25, 100, 1, 100, 255))
# Generate the range of apparent (possibly saturated) magnitudes
app_mag_range = np.append(np.linspace(-0.5, 2, 1000), np.linspace(2, 6, 10))
# Generate a range of gaussian stddevs
gauss_stddevs = [1.5, 1.3, 1.15]
for stddev in gauss_stddevs:
unsaturated_mags = []
for app_mag in app_mag_range:
# Compute the unsaturated magnitude
mag_app_unsaturated = findUnsaturatedMagnitude(app_mag, photom_offset, background_lvl, fps, \
ang_vel, stddev)
print(app_mag, mag_app_unsaturated)
unsaturated_mags.append(mag_app_unsaturated)
plt.plot(unsaturated_mags, app_mag_range, label='$\sigma = {:.2f}$'.format(stddev))
plt.plot(app_mag_range, app_mag_range, color='k', linestyle='--', label='No saturation')
plt.gca().invert_yaxis()
plt.gca().invert_xaxis()
plt.axes().set_aspect('equal', 'box')
plt.xlabel('Actual magnitude')
plt.ylabel('Apparent magnitude')
plt.grid()
plt.legend()
plt.tight_layout()
plt.savefig('saturation_sim.png', dpi=300)
plt.show()
| gpl-3.0 |
rs2/pandas | pandas/tests/frame/test_arithmetic.py | 1 | 54436 | from collections import deque
from datetime import datetime
import operator
import re
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
import pandas.core.common as com
from pandas.core.computation.expressions import _MIN_ELEMENTS, NUMEXPR_INSTALLED
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
msgs = [
r"Invalid comparison between dtype=datetime64\[ns\] and ndarray",
"invalid type promotion",
]
msg = "|".join(msgs)
with pytest.raises(TypeError, match=msg):
x >= y
with pytest.raises(TypeError, match=msg):
x > y
with pytest.raises(TypeError, match=msg):
x < y
with pytest.raises(TypeError, match=msg):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
msg = (
"'(<|>)=?' not supported between "
"instances of 'numpy.ndarray' and 'Timestamp'"
)
with pytest.raises(TypeError, match=msg):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError, match=msg):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = pd.DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(
np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]
)
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df3.gt(2j)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df3["a"].gt(2j)
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df3.values > 2j
def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
result = df1.ne(df2)
exp = pd.DataFrame({"col": [False, True, False]})
tm.assert_frame_equal(result, exp)
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
result = getattr(df, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
def test_df_flex_cmp_ea_dtype_with_ndarray_series(self):
ii = pd.IntervalIndex.from_breaks([1, 2, 3])
df = pd.DataFrame({"A": ii, "B": ii})
ser = pd.Series([0, 0])
res = df.eq(ser, axis=0)
expected = pd.DataFrame({"A": [False, False], "B": [False, False]})
tm.assert_frame_equal(res, expected)
ser2 = pd.Series([1, 2], index=["A", "B"])
res2 = df.eq(ser2, axis=1)
tm.assert_frame_equal(res2, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
ser = pd.Series(arr)
df = pd.DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
expected = pd.DataFrame({col: df[col] // ser for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
@pytest.mark.skipif(not NUMEXPR_INSTALLED, reason="numexpr not installed")
@pytest.mark.parametrize("opname", ["floordiv", "pow"])
def test_floordiv_axis0_numexpr_path(self, opname):
# case that goes through numexpr and has to fall back to masked_arith_op
op = getattr(operator, opname)
arr = np.arange(_MIN_ELEMENTS + 100).reshape(_MIN_ELEMENTS // 100 + 1, -1) * 100
df = pd.DataFrame(arr)
df["C"] = 1.0
ser = df[0]
result = getattr(df, opname)(ser, axis=0)
expected = pd.DataFrame({col: op(df[col], ser) for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = getattr(df, opname)(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
tdi = pd.timedelta_range("1", periods=10)
tser = pd.Series(tdi)
df = pd.DataFrame({0: dti, 1: tdi})
result = df.add(tser, axis=0)
expected = pd.DataFrame({0: dti + tdi, 1: tdi + tdi})
tm.assert_frame_equal(result, expected)
def test_df_add_flex_filled_mixed_dtypes(self):
# GH 19611
dti = pd.date_range("2016-01-01", periods=3)
ser = pd.Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]")
df = pd.DataFrame({"A": dti, "B": ser})
other = pd.DataFrame({"A": ser, "B": ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{
"A": pd.Series(
["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]"
),
"B": ser * 2,
}
)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame(
self, all_arithmetic_operators, float_frame, mixed_float_frame
):
# one instance of parametrized fixture
op = all_arithmetic_operators
def f(x, y):
# r-versions not in operator-stdlib; get op without "r" and invert
if op.startswith("__r"):
return getattr(operator, op.replace("__r", "__"))(y, x)
return getattr(operator, op)(x, y)
result = getattr(float_frame, op)(2 * float_frame)
expected = f(float_frame, 2 * float_frame)
tm.assert_frame_equal(result, expected)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
@pytest.mark.parametrize("op", ["__add__", "__sub__", "__mul__"])
def test_arith_flex_frame_mixed(
self, op, int_frame, mixed_int_frame, mixed_float_frame
):
f = getattr(operator, op)
# vs mix int
result = getattr(mixed_int_frame, op)(2 + mixed_int_frame)
expected = f(mixed_int_frame, 2 + mixed_int_frame)
# no overflow in the uint
dtype = None
if op in ["__sub__"]:
dtype = dict(B="uint64", C=None)
elif op in ["__add__", "__mul__"]:
dtype = dict(C=None)
tm.assert_frame_equal(result, expected)
_check_mixed_int(result, dtype=dtype)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
# vs plain int
result = getattr(int_frame, op)(2 * int_frame)
expected = f(int_frame, 2 * int_frame)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame_raise(self, all_arithmetic_operators, float_frame):
# one instance of parametrized fixture
op = all_arithmetic_operators
# Check that arrays with dim >= 3 raise
for dim in range(3, 6):
arr = np.ones((1,) * dim)
msg = "Unable to coerce to Series/DataFrame"
with pytest.raises(ValueError, match=msg):
getattr(float_frame, op)(arr)
def test_arith_flex_frame_corner(self, float_frame):
const_add = float_frame.add(1)
tm.assert_frame_equal(const_add, float_frame + 1)
# corner cases
result = float_frame.add(float_frame[:0])
tm.assert_frame_equal(result, float_frame * np.nan)
result = float_frame[:0].add(float_frame)
tm.assert_frame_equal(result, float_frame * np.nan)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], fill_value=3)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], axis="index", fill_value=3)
def test_arith_flex_series(self, simple_frame):
df = simple_frame
row = df.xs("a")
col = df["two"]
# after arithmetic refactor, add truediv here
ops = ["add", "sub", "mul", "mod"]
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
tm.assert_frame_equal(f(row), op(df, row))
tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
tm.assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
tm.assert_frame_equal(df.div(row), df / row)
tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH 7325
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="int64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="float64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
def test_arith_flex_zero_len_raises(self):
# GH 19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
ser_len0 = pd.Series([], dtype=object)
df_len0 = pd.DataFrame(columns=["A", "B"])
df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
with pytest.raises(NotImplementedError, match="fill_value"):
df.add(ser_len0, fill_value="E")
with pytest.raises(NotImplementedError, match="fill_value"):
df_len0.sub(df["A"], axis=None, fill_value=3)
def test_flex_add_scalar_fill_value(self):
# GH#12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype="float")
df = pd.DataFrame({"foo": dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
tm.assert_frame_equal(res, exp)
class TestFrameArithmetic:
def test_td64_op_nat_casting(self):
# Make sure we don't accidentally treat timedelta64(NaT) as datetime64
# when calling dispatch_to_series in DataFrame arithmetic
ser = pd.Series(["NaT", "NaT"], dtype="timedelta64[ns]")
df = pd.DataFrame([[1, 2], [3, 4]])
result = df * ser
expected = pd.DataFrame({0: ser, 1: ser})
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_rowlike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
expected = pd.DataFrame(
[[2, 4], [4, 6], [6, 8]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + rowlike
tm.assert_frame_equal(result, expected)
result = rowlike + df
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_collike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
expected = pd.DataFrame(
[[1, 2], [5, 6], [9, 10]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + collike
tm.assert_frame_equal(result, expected)
result = collike + df
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_rowlike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
exvals = [
getattr(df.loc["A"], opname)(rowlike.squeeze()),
getattr(df.loc["B"], opname)(rowlike.squeeze()),
getattr(df.loc["C"], opname)(rowlike.squeeze()),
]
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index)
result = getattr(df, opname)(rowlike)
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_collike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
exvals = {
True: getattr(df[True], opname)(collike.squeeze()),
False: getattr(df[False], opname)(collike.squeeze()),
}
dtype = None
if opname in ["__rmod__", "__rfloordiv__"]:
# Series ops may return mixed int/float dtypes in cases where
# DataFrame op will return all-float. So we upcast `expected`
dtype = np.common_type(*[x.values for x in exvals.values()])
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index, dtype=dtype)
result = getattr(df, opname)(collike)
tm.assert_frame_equal(result, expected)
def test_df_bool_mul_int(self):
# GH 22047, GH 22163 multiplication by 1 should result in int dtype,
# not object dtype
df = pd.DataFrame([[False, True], [False, False]])
result = df * 1
# On appveyor this comes back as np.int32 instead of np.int64,
# so we check dtype.kind instead of just dtype
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
result = 1 * df
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
def test_arith_mixed(self):
left = pd.DataFrame({"A": ["a", "b", "c"], "B": [1, 2, 3]})
result = left + left
expected = pd.DataFrame({"A": ["aa", "bb", "cc"], "B": [2, 4, 6]})
tm.assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = pd.DataFrame({"A": [1.1, 3.3], "B": [2.5, -3.9]})
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
tm.assert_series_equal(result[col], op(df[col], 1))
_test_op(df, operator.add)
_test_op(df, operator.sub)
_test_op(df, operator.mul)
_test_op(df, operator.truediv)
_test_op(df, operator.floordiv)
_test_op(df, operator.pow)
_test_op(df, lambda x, y: y + x)
_test_op(df, lambda x, y: y - x)
_test_op(df, lambda x, y: y * x)
_test_op(df, lambda x, y: y / x)
_test_op(df, lambda x, y: y ** x)
_test_op(df, lambda x, y: x + y)
_test_op(df, lambda x, y: x - y)
_test_op(df, lambda x, y: x * y)
_test_op(df, lambda x, y: x / y)
_test_op(df, lambda x, y: x ** y)
@pytest.mark.parametrize(
"values", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3), deque([1, 2])]
)
def test_arith_alignment_non_pandas_object(self, values):
# GH#17901
df = pd.DataFrame({"A": [1, 1], "B": [1, 1]})
expected = pd.DataFrame({"A": [2, 2], "B": [3, 3]})
result = df + values
tm.assert_frame_equal(result, expected)
def test_arith_non_pandas_object(self):
df = pd.DataFrame(
np.arange(1, 10, dtype="f8").reshape(3, 3),
columns=["one", "two", "three"],
index=["a", "b", "c"],
)
val1 = df.xs("a").values
added = pd.DataFrame(df.values + val1, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val1, added)
added = pd.DataFrame((df.values.T + val1).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df["two"])
added = pd.DataFrame(df.values + val2, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val2, added)
added = pd.DataFrame((df.values.T + val2).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val2, axis="index"), added)
val3 = np.random.rand(*df.shape)
added = pd.DataFrame(df.values + val3, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val3), added)
def test_operations_with_interval_categories_index(self, all_arithmetic_operators):
# GH#27415
op = all_arithmetic_operators
ind = pd.CategoricalIndex(pd.interval_range(start=0.0, end=2.0))
data = [1, 2]
df = pd.DataFrame([data], columns=ind)
num = 10
result = getattr(df, op)(num)
expected = pd.DataFrame([[getattr(n, op)(num) for n in data]], columns=ind)
tm.assert_frame_equal(result, expected)
def test_frame_with_frame_reindex(self):
# GH#31623
df = pd.DataFrame(
{
"foo": [pd.Timestamp("2019"), pd.Timestamp("2020")],
"bar": [pd.Timestamp("2018"), pd.Timestamp("2021")],
},
columns=["foo", "bar"],
)
df2 = df[["foo"]]
result = df - df2
expected = pd.DataFrame(
{"foo": [pd.Timedelta(0), pd.Timedelta(0)], "bar": [np.nan, np.nan]},
columns=["bar", "foo"],
)
tm.assert_frame_equal(result, expected)
def test_frame_with_zero_len_series_corner_cases():
# GH#28600
# easy all-float case
df = pd.DataFrame(np.random.randn(6).reshape(3, 2), columns=["A", "B"])
ser = pd.Series(dtype=np.float64)
result = df + ser
expected = pd.DataFrame(df.values * np.nan, columns=df.columns)
tm.assert_frame_equal(result, expected)
result = df == ser
expected = pd.DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
# non-float case should not raise on comparison
df2 = pd.DataFrame(df.values.view("M8[ns]"), columns=df.columns)
result = df2 == ser
expected = pd.DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_zero_len_frame_with_series_corner_cases():
# GH#28600
df = pd.DataFrame(columns=["A", "B"], dtype=np.float64)
ser = pd.Series([1, 2], index=["A", "B"])
result = df + ser
expected = df
tm.assert_frame_equal(result, expected)
def test_frame_single_columns_object_sum_axis_1():
# GH 13758
data = {
"One": pd.Series(["A", 1.2, np.nan]),
}
df = pd.DataFrame(data)
result = df.sum(axis=1)
expected = pd.Series(["A", 1.2, 0])
tm.assert_series_equal(result, expected)
# -------------------------------------------------------------------
# Unsorted
# These arithmetic tests were previously in other files, eventually
# should be parametrized and put into tests.arithmetic
class TestFrameArithmeticUnsorted:
def test_frame_add_tz_mismatch_converts_to_utc(self):
rng = pd.date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern")
df = pd.DataFrame(np.random.randn(len(rng)), index=rng, columns=["a"])
df_moscow = df.tz_convert("Europe/Moscow")
result = df + df_moscow
assert result.index.tz is pytz.utc
result = df_moscow + df
assert result.index.tz is pytz.utc
def test_align_frame(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = pd.DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.values[1::2] = np.nan
tm.assert_frame_equal(result, expected)
half = ts[::2]
result = ts + half.take(np.random.permutation(len(half)))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"op", [operator.add, operator.sub, operator.mul, operator.truediv]
)
def test_operators_none_as_na(self, op):
df = DataFrame(
{"col1": [2, 5.0, 123, None], "col2": [1, 2, 3, 4]}, dtype=object
)
# since filling converts dtypes from object, changed expected to be
# object
filled = df.fillna(np.nan)
result = op(df, 3)
expected = op(filled, 3).astype(object)
expected[com.isna(expected)] = None
tm.assert_frame_equal(result, expected)
result = op(df, df)
expected = op(filled, filled).astype(object)
expected[com.isna(expected)] = None
tm.assert_frame_equal(result, expected)
result = op(df, df.fillna(7))
tm.assert_frame_equal(result, expected)
result = op(df.fillna(7), df)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize("op,res", [("__eq__", False), ("__ne__", True)])
# TODO: not sure what's correct here.
@pytest.mark.filterwarnings("ignore:elementwise:FutureWarning")
def test_logical_typeerror_with_non_valid(self, op, res, float_frame):
# we are comparing floats vs a string
result = getattr(float_frame, op)("foo")
assert bool(result.all().all()) is res
def test_binary_ops_align(self):
# test aligning binary ops
# GH 6681
index = MultiIndex.from_product(
[list("abc"), ["one", "two", "three"], [1, 2, 3]],
names=["first", "second", "third"],
)
df = DataFrame(
np.arange(27 * 3).reshape(27, 3),
index=index,
columns=["value1", "value2", "value3"],
).sort_index()
idx = pd.IndexSlice
for op in ["add", "sub", "mul", "div", "truediv"]:
opa = getattr(operator, op, None)
if opa is None:
continue
x = Series([1.0, 10.0, 100.0], [1, 2, 3])
result = getattr(df, op)(x, level="third", axis=0)
expected = pd.concat(
[opa(df.loc[idx[:, :, i], :], v) for i, v in x.items()]
).sort_index()
tm.assert_frame_equal(result, expected)
x = Series([1.0, 10.0], ["two", "three"])
result = getattr(df, op)(x, level="second", axis=0)
expected = (
pd.concat([opa(df.loc[idx[:, i], :], v) for i, v in x.items()])
.reindex_like(df)
.sort_index()
)
tm.assert_frame_equal(result, expected)
# GH9463 (alignment level of dataframe with series)
midx = MultiIndex.from_product([["A", "B"], ["a", "b"]])
df = DataFrame(np.ones((2, 4), dtype="int64"), columns=midx)
s = pd.Series({"a": 1, "b": 2})
df2 = df.copy()
df2.columns.names = ["lvl0", "lvl1"]
s2 = s.copy()
s2.index.name = "lvl1"
# different cases of integer/string level names:
res1 = df.mul(s, axis=1, level=1)
res2 = df.mul(s2, axis=1, level=1)
res3 = df2.mul(s, axis=1, level=1)
res4 = df2.mul(s2, axis=1, level=1)
res5 = df2.mul(s, axis=1, level="lvl1")
res6 = df2.mul(s2, axis=1, level="lvl1")
exp = DataFrame(
np.array([[1, 2, 1, 2], [1, 2, 1, 2]], dtype="int64"), columns=midx
)
for res in [res1, res2]:
tm.assert_frame_equal(res, exp)
exp.columns.names = ["lvl0", "lvl1"]
for res in [res3, res4, res5, res6]:
tm.assert_frame_equal(res, exp)
def test_add_with_dti_mismatched_tzs(self):
base = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="UTC")
idx1 = base.tz_convert("Asia/Tokyo")[:2]
idx2 = base.tz_convert("US/Eastern")[1:]
df1 = DataFrame({"A": [1, 2]}, index=idx1)
df2 = DataFrame({"A": [1, 1]}, index=idx2)
exp = DataFrame({"A": [np.nan, 3, np.nan]}, index=base)
tm.assert_frame_equal(df1 + df2, exp)
def test_combineFrame(self, float_frame, mixed_float_frame, mixed_int_frame):
frame_copy = float_frame.reindex(float_frame.index[::2])
del frame_copy["D"]
frame_copy["C"][:5] = np.nan
added = float_frame + frame_copy
indexer = added["A"].dropna().index
exp = (float_frame["A"] * 2).copy()
tm.assert_series_equal(added["A"].dropna(), exp.loc[indexer])
exp.loc[~exp.index.isin(indexer)] = np.nan
tm.assert_series_equal(added["A"], exp.loc[added["A"].index])
assert np.isnan(added["C"].reindex(frame_copy.index)[:5]).all()
# assert(False)
assert np.isnan(added["D"]).all()
self_added = float_frame + float_frame
tm.assert_index_equal(self_added.index, float_frame.index)
added_rev = frame_copy + float_frame
assert np.isnan(added["D"]).all()
assert np.isnan(added_rev["D"]).all()
# corner cases
# empty
plus_empty = float_frame + DataFrame()
assert np.isnan(plus_empty.values).all()
empty_plus = DataFrame() + float_frame
assert np.isnan(empty_plus.values).all()
empty_empty = DataFrame() + DataFrame()
assert empty_empty.empty
# out of order
reverse = float_frame.reindex(columns=float_frame.columns[::-1])
tm.assert_frame_equal(reverse + float_frame, float_frame * 2)
# mix vs float64, upcast
added = float_frame + mixed_float_frame
_check_mixed_float(added, dtype="float64")
added = mixed_float_frame + float_frame
_check_mixed_float(added, dtype="float64")
# mix vs mix
added = mixed_float_frame + mixed_float_frame
_check_mixed_float(added, dtype=dict(C=None))
# with int
added = float_frame + mixed_int_frame
_check_mixed_float(added, dtype="float64")
def test_combine_series(
self, float_frame, mixed_float_frame, mixed_int_frame, datetime_frame
):
# Series
series = float_frame.xs(float_frame.index[0])
added = float_frame + series
for key, s in added.items():
tm.assert_series_equal(s, float_frame[key] + series[key])
larger_series = series.to_dict()
larger_series["E"] = 1
larger_series = Series(larger_series)
larger_added = float_frame + larger_series
for key, s in float_frame.items():
tm.assert_series_equal(larger_added[key], s + series[key])
assert "E" in larger_added
assert np.isnan(larger_added["E"]).all()
# no upcast needed
added = mixed_float_frame + series
assert np.all(added.dtypes == series.dtype)
# vs mix (upcast) as needed
added = mixed_float_frame + series.astype("float32")
_check_mixed_float(added, dtype=dict(C=None))
added = mixed_float_frame + series.astype("float16")
_check_mixed_float(added, dtype=dict(C=None))
# FIXME: don't leave commented-out
# these raise with numexpr.....as we are adding an int64 to an
# uint64....weird vs int
# added = mixed_int_frame + (100*series).astype('int64')
# _check_mixed_int(added, dtype = dict(A = 'int64', B = 'float64', C =
# 'int64', D = 'int64'))
# added = mixed_int_frame + (100*series).astype('int32')
# _check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C =
# 'int32', D = 'int64'))
# TimeSeries
ts = datetime_frame["A"]
# 10890
# we no longer allow auto timeseries broadcasting
# and require explicit broadcasting
added = datetime_frame.add(ts, axis="index")
for key, col in datetime_frame.items():
result = col + ts
tm.assert_series_equal(added[key], result, check_names=False)
assert added[key].name == key
if col.name == ts.name:
assert result.name == "A"
else:
assert result.name is None
smaller_frame = datetime_frame[:-5]
smaller_added = smaller_frame.add(ts, axis="index")
tm.assert_index_equal(smaller_added.index, datetime_frame.index)
smaller_ts = ts[:-5]
smaller_added2 = datetime_frame.add(smaller_ts, axis="index")
tm.assert_frame_equal(smaller_added, smaller_added2)
# length 0, result is all-nan
result = datetime_frame.add(ts[:0], axis="index")
expected = DataFrame(
np.nan, index=datetime_frame.index, columns=datetime_frame.columns
)
tm.assert_frame_equal(result, expected)
# Frame is all-nan
result = datetime_frame[:0].add(ts, axis="index")
expected = DataFrame(
np.nan, index=datetime_frame.index, columns=datetime_frame.columns
)
tm.assert_frame_equal(result, expected)
# empty but with non-empty index
frame = datetime_frame[:1].reindex(columns=[])
result = frame.mul(ts, axis="index")
assert len(result) == len(ts)
def test_combineFunc(self, float_frame, mixed_float_frame):
result = float_frame * 2
tm.assert_numpy_array_equal(result.values, float_frame.values * 2)
# vs mix
result = mixed_float_frame * 2
for c, s in result.items():
tm.assert_numpy_array_equal(s.values, mixed_float_frame[c].values * 2)
_check_mixed_float(result, dtype=dict(C=None))
result = DataFrame() * 2
assert result.index.equals(DataFrame().index)
assert len(result.columns) == 0
def test_comparisons(self, simple_frame, float_frame):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
row = simple_frame.xs("a")
ndim_5 = np.ones(df1.shape + (1, 1, 1))
def test_comp(func):
result = func(df1, df2)
tm.assert_numpy_array_equal(result.values, func(df1.values, df2.values))
msg = (
"Unable to coerce to Series/DataFrame, "
"dimension must be <= 2: (30, 4, 1, 1, 1)"
)
with pytest.raises(ValueError, match=re.escape(msg)):
func(df1, ndim_5)
result2 = func(simple_frame, row)
tm.assert_numpy_array_equal(
result2.values, func(simple_frame.values, row.values)
)
result3 = func(float_frame, 0)
tm.assert_numpy_array_equal(result3.values, func(float_frame.values, 0))
msg = "Can only compare identically-labeled DataFrame"
with pytest.raises(ValueError, match=msg):
func(simple_frame, simple_frame[:2])
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_strings_to_numbers_comparisons_raises(self, compare_operators_no_eq_ne):
# GH 11565
df = DataFrame(
{x: {"x": "foo", "y": "bar", "z": "baz"} for x in ["a", "b", "c"]}
)
f = getattr(operator, compare_operators_no_eq_ne)
msg = "'[<>]=?' not supported between instances of 'str' and 'int'"
with pytest.raises(TypeError, match=msg):
f(df, 0)
def test_comparison_protected_from_errstate(self):
missing_df = tm.makeDataFrame()
missing_df.iloc[0]["A"] = np.nan
with np.errstate(invalid="ignore"):
expected = missing_df.values < 0
with np.errstate(invalid="raise"):
result = (missing_df < 0).values
tm.assert_numpy_array_equal(result, expected)
def test_boolean_comparison(self):
# GH 4576
# boolean comparisons with a tuple/list give unexpected results
df = DataFrame(np.arange(6).reshape((3, 2)))
b = np.array([2, 2])
b_r = np.atleast_2d([2, 2])
b_c = b_r.T
lst = [2, 2, 2]
tup = tuple(lst)
# gt
expected = DataFrame([[False, False], [False, True], [True, True]])
result = df > b
tm.assert_frame_equal(result, expected)
result = df.values > b
tm.assert_numpy_array_equal(result, expected.values)
msg1d = "Unable to coerce to Series, length must be 2: given 3"
msg2d = "Unable to coerce to DataFrame, shape must be"
msg2db = "operands could not be broadcast together with shapes"
with pytest.raises(ValueError, match=msg1d):
# wrong shape
df > lst
with pytest.raises(ValueError, match=msg1d):
# wrong shape
result = df > tup
# broadcasts like ndarray (GH#23000)
result = df > b_r
tm.assert_frame_equal(result, expected)
result = df.values > b_r
tm.assert_numpy_array_equal(result, expected.values)
with pytest.raises(ValueError, match=msg2d):
df > b_c
with pytest.raises(ValueError, match=msg2db):
df.values > b_c
# ==
expected = DataFrame([[False, False], [True, False], [False, False]])
result = df == b
tm.assert_frame_equal(result, expected)
with pytest.raises(ValueError, match=msg1d):
result = df == lst
with pytest.raises(ValueError, match=msg1d):
result = df == tup
# broadcasts like ndarray (GH#23000)
result = df == b_r
tm.assert_frame_equal(result, expected)
result = df.values == b_r
tm.assert_numpy_array_equal(result, expected.values)
with pytest.raises(ValueError, match=msg2d):
df == b_c
assert df.values.shape != b_c.shape
# with alignment
df = DataFrame(
np.arange(6).reshape((3, 2)), columns=list("AB"), index=list("abc")
)
expected.index = df.index
expected.columns = df.columns
with pytest.raises(ValueError, match=msg1d):
result = df == lst
with pytest.raises(ValueError, match=msg1d):
result = df == tup
def test_inplace_ops_alignment(self):
# inplace ops / ops alignment
# GH 8511
columns = list("abcdefg")
X_orig = DataFrame(
np.arange(10 * len(columns)).reshape(-1, len(columns)),
columns=columns,
index=range(10),
)
Z = 100 * X_orig.iloc[:, 1:-1].copy()
block1 = list("bedcf")
subs = list("bcdef")
# add
X = X_orig.copy()
result1 = (X[block1] + Z).reindex(columns=subs)
X[block1] += Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] + Z[block1]).reindex(columns=subs)
X[block1] += Z[block1]
result4 = X.reindex(columns=subs)
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result1, result3)
tm.assert_frame_equal(result1, result4)
# sub
X = X_orig.copy()
result1 = (X[block1] - Z).reindex(columns=subs)
X[block1] -= Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] - Z[block1]).reindex(columns=subs)
X[block1] -= Z[block1]
result4 = X.reindex(columns=subs)
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result1, result3)
tm.assert_frame_equal(result1, result4)
def test_inplace_ops_identity(self):
# GH 5104
# make sure that we are actually changing the object
s_orig = Series([1, 2, 3])
df_orig = DataFrame(np.random.randint(0, 5, size=10).reshape(-1, 5))
# no dtype change
s = s_orig.copy()
s2 = s
s += 1
tm.assert_series_equal(s, s2)
tm.assert_series_equal(s_orig + 1, s)
assert s is s2
assert s._mgr is s2._mgr
df = df_orig.copy()
df2 = df
df += 1
tm.assert_frame_equal(df, df2)
tm.assert_frame_equal(df_orig + 1, df)
assert df is df2
assert df._mgr is df2._mgr
# dtype change
s = s_orig.copy()
s2 = s
s += 1.5
tm.assert_series_equal(s, s2)
tm.assert_series_equal(s_orig + 1.5, s)
df = df_orig.copy()
df2 = df
df += 1.5
tm.assert_frame_equal(df, df2)
tm.assert_frame_equal(df_orig + 1.5, df)
assert df is df2
assert df._mgr is df2._mgr
# mixed dtype
arr = np.random.randint(0, 10, size=5)
df_orig = DataFrame({"A": arr.copy(), "B": "foo"})
df = df_orig.copy()
df2 = df
df["A"] += 1
expected = DataFrame({"A": arr.copy() + 1, "B": "foo"})
tm.assert_frame_equal(df, expected)
tm.assert_frame_equal(df2, expected)
assert df._mgr is df2._mgr
df = df_orig.copy()
df2 = df
df["A"] += 1.5
expected = DataFrame({"A": arr.copy() + 1.5, "B": "foo"})
tm.assert_frame_equal(df, expected)
tm.assert_frame_equal(df2, expected)
assert df._mgr is df2._mgr
@pytest.mark.parametrize(
"op",
[
"add",
"and",
"div",
"floordiv",
"mod",
"mul",
"or",
"pow",
"sub",
"truediv",
"xor",
],
)
def test_inplace_ops_identity2(self, op):
if op == "div":
return
df = DataFrame({"a": [1.0, 2.0, 3.0], "b": [1, 2, 3]})
operand = 2
if op in ("and", "or", "xor"):
# cannot use floats for boolean ops
df["a"] = [True, False, True]
df_copy = df.copy()
iop = f"__i{op}__"
op = f"__{op}__"
# no id change and value is correct
getattr(df, iop)(operand)
expected = getattr(df_copy, op)(operand)
tm.assert_frame_equal(df, expected)
expected = id(df)
assert id(df) == expected
def test_alignment_non_pandas(self):
index = ["A", "B", "C"]
columns = ["X", "Y", "Z"]
df = pd.DataFrame(np.random.randn(3, 3), index=index, columns=columns)
align = pd.core.ops.align_method_FRAME
for val in [
[1, 2, 3],
(1, 2, 3),
np.array([1, 2, 3], dtype=np.int64),
range(1, 4),
]:
expected = DataFrame({"X": val, "Y": val, "Z": val}, index=df.index)
tm.assert_frame_equal(align(df, val, "index")[1], expected)
expected = DataFrame(
{"X": [1, 1, 1], "Y": [2, 2, 2], "Z": [3, 3, 3]}, index=df.index
)
tm.assert_frame_equal(align(df, val, "columns")[1], expected)
# length mismatch
msg = "Unable to coerce to Series, length must be 3: given 2"
for val in [[1, 2], (1, 2), np.array([1, 2]), range(1, 3)]:
with pytest.raises(ValueError, match=msg):
align(df, val, "index")
with pytest.raises(ValueError, match=msg):
align(df, val, "columns")
val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
tm.assert_frame_equal(
align(df, val, "index")[1],
DataFrame(val, index=df.index, columns=df.columns),
)
tm.assert_frame_equal(
align(df, val, "columns")[1],
DataFrame(val, index=df.index, columns=df.columns),
)
# shape mismatch
msg = "Unable to coerce to DataFrame, shape must be"
val = np.array([[1, 2, 3], [4, 5, 6]])
with pytest.raises(ValueError, match=msg):
align(df, val, "index")
with pytest.raises(ValueError, match=msg):
align(df, val, "columns")
val = np.zeros((3, 3, 3))
msg = re.escape(
"Unable to coerce to Series/DataFrame, dimension must be <= 2: (3, 3, 3)"
)
with pytest.raises(ValueError, match=msg):
align(df, val, "index")
with pytest.raises(ValueError, match=msg):
align(df, val, "columns")
def test_no_warning(self, all_arithmetic_operators):
df = pd.DataFrame({"A": [0.0, 0.0], "B": [0.0, None]})
b = df["B"]
with tm.assert_produces_warning(None):
getattr(df, all_arithmetic_operators)(b, 0)
def test_pow_with_realignment():
# GH#32685 pow has special semantics for operating with null values
left = pd.DataFrame({"A": [0, 1, 2]})
right = pd.DataFrame(index=[0, 1, 2])
result = left ** right
expected = pd.DataFrame({"A": [np.nan, 1.0, np.nan]})
tm.assert_frame_equal(result, expected)
# TODO: move to tests.arithmetic and parametrize
def test_pow_nan_with_zero():
left = pd.DataFrame({"A": [np.nan, np.nan, np.nan]})
right = pd.DataFrame({"A": [0, 0, 0]})
expected = pd.DataFrame({"A": [1.0, 1.0, 1.0]})
result = left ** right
tm.assert_frame_equal(result, expected)
result = left["A"] ** right["A"]
tm.assert_series_equal(result, expected["A"])
def test_dataframe_series_extension_dtypes():
# https://github.com/pandas-dev/pandas/issues/34311
df = pd.DataFrame(np.random.randint(0, 100, (10, 3)), columns=["a", "b", "c"])
ser = pd.Series([1, 2, 3], index=["a", "b", "c"])
expected = df.to_numpy("int64") + ser.to_numpy("int64").reshape(-1, 3)
expected = pd.DataFrame(expected, columns=df.columns, dtype="Int64")
df_ea = df.astype("Int64")
result = df_ea + ser
tm.assert_frame_equal(result, expected)
result = df_ea + ser.astype("Int64")
tm.assert_frame_equal(result, expected)
def test_dataframe_blockwise_slicelike():
# GH#34367
arr = np.random.randint(0, 1000, (100, 10))
df1 = pd.DataFrame(arr)
df2 = df1.copy()
df2.iloc[0, [1, 3, 7]] = np.nan
df3 = df1.copy()
df3.iloc[0, [5]] = np.nan
df4 = df1.copy()
df4.iloc[0, np.arange(2, 5)] = np.nan
df5 = df1.copy()
df5.iloc[0, np.arange(4, 7)] = np.nan
for left, right in [(df1, df2), (df2, df3), (df4, df5)]:
res = left + right
expected = pd.DataFrame({i: left[i] + right[i] for i in left.columns})
tm.assert_frame_equal(res, expected)
@pytest.mark.parametrize(
"df, col_dtype",
[
(pd.DataFrame([[1.0, 2.0], [4.0, 5.0]], columns=list("ab")), "float64"),
(pd.DataFrame([[1.0, "b"], [4.0, "b"]], columns=list("ab")), "object"),
],
)
def test_dataframe_operation_with_non_numeric_types(df, col_dtype):
# GH #22663
expected = pd.DataFrame([[0.0, np.nan], [3.0, np.nan]], columns=list("ab"))
expected = expected.astype({"b": col_dtype})
result = df + pd.Series([-1.0], index=list("a"))
tm.assert_frame_equal(result, expected)
def test_arith_reindex_with_duplicates():
# https://github.com/pandas-dev/pandas/issues/35194
df1 = pd.DataFrame(data=[[0]], columns=["second"])
df2 = pd.DataFrame(data=[[0, 0, 0]], columns=["first", "second", "second"])
result = df1 + df2
expected = pd.DataFrame([[np.nan, 0, 0]], columns=["first", "second", "second"])
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
fzalkow/scikit-learn | sklearn/metrics/metrics.py | 233 | 1262 | import warnings
warnings.warn("sklearn.metrics.metrics is deprecated and will be removed in "
"0.18. Please import from sklearn.metrics",
DeprecationWarning)
from .ranking import auc
from .ranking import average_precision_score
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
| bsd-3-clause |
keans/dstools | dstools/roc.py | 1 | 4089 | import logging
import ujson as json
import numpy as np
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
from matplotlib import ticker
from .plot import FigureContext
# disable extensive matplotlib logging
logging.getLogger("matplotlib").setLevel(logging.WARNING)
# logger
log = logging.getLogger(__name__)
class RocCurve:
"""
class that abstracts some of the logic of the
Receiver Operating Characteristics (ROC) curve
"""
def __init__(self, labels=None, scores=None, name="ROC"):
self._name = name
if (labels is not None) and (scores is not None):
# compute roc curve, if provided
assert(len(labels) == len(scores))
self.compute_roc_curve(labels, scores)
def compute_roc_curve(self, labels, scores):
"""
compute the ROC curve for given labels and scores
"""
self._fpr, self._tpr, self._thresholds = roc_curve(labels, scores)
@property
def dict(self):
return {
"name": self._name,
"fpr": self._fpr,
"tpr": self._tpr,
}
def set_name(self, name):
"""
set new name
"""
self._name = name
def auc(self, max_fpr=1.0):
"""
compute (bounded-)AUC for ROC curve
"""
return self._bounded_auc(self._fpr, self._tpr, max_fpr)
def _bounded_auc(self, fpr, tpr, max_fpr=None):
"""
compute the bounded AUC of a ROC curve on given
false positive rate and true positive rate
"""
if (max_fpr is None) or (max_fpr == 1.0):
# just return full auc i.e. without modifications
return fpr, tpr, auc(fpr, tpr)
# compute bounded auc
stop = np.searchsorted(fpr, max_fpr, "right")
x_interp = [fpr[stop - 1], fpr[stop]]
y_interp = [tpr[stop - 1], tpr[stop]]
tpr = np.append(tpr[:stop], np.interp(max_fpr, x_interp, y_interp))
fpr = np.append(fpr[:stop], max_fpr)
# McClish correction: standardize result to be 0.5,
# if non-discriminant and 1 if maximal
min_area = 0.5 * max_fpr ** 2
auc_ = 0.5 * (1 + (auc(fpr, tpr) - min_area) / (max_fpr - min_area))
return fpr, tpr, auc_
def plot(self, ax, max_fpr=1.0, title=None, random_line=True):
# plot ROC curve
ax.plot(self._fpr, self._tpr, label=self._name)
# plot random predictions line
if random_line is True:
plt.plot([0, 1], [0, 1], "k--")
ax.xaxis.set_major_locator(ticker.MaxNLocator(prune='lower'))
plt.xlim([0.0, max_fpr or 1.0])
plt.ylim([0.0, 1.0])
if title is not None:
plt.title(title)
plt.legend(loc="lower right")
def save(self, filename):
"""
save given ROC curve as .json file
"""
log.debug("saving ROC curve to '{}'...".format(filename))
with open(filename, "w") as f:
json.dump(self.dict, f)
def load(self, filename):
"""
save given ROC curve as .json file
"""
log.debug("loading ROC curve from '{}'...".format(filename))
with open(filename, "r") as f:
d = json.load(f)
self._fpr = d["fpr"]
self._tpr = d["tpr"]
self._name = d["name"]
def plot_rocs(
rocs, targetdir, filename="roc", max_fpr=None, random_line=True,
title=None, figsize=(10, 8), ext="pdf"
):
"""
plot multiple ROC curves into one figure
list of rocs must contain dictionaries with the following keys:
"fpr", "tpr", "auc_score", "label"
"""
if isinstance(rocs, RocCurve):
# convert single roc dict into list
rocs = [rocs]
with FigureContext(
targetdir=targetdir, filename=filename, title=title,
xlabel="False Positive Rate", ylabel="True Positive Rate",
rotate_xticks=False, ext=ext, figsize=figsize
) as ax:
for roc in rocs:
roc.plot(ax, max_fpr, title, random_line)
return plt.gcf()
| mit |
amacd31/dice-counter | run.py | 1 | 3868 | """
Copyright (C) 2014 Andrew MacDonald
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
import argparse
import datetime
import json
from matplotlib import pyplot as plt
import numpy as np
class DieCounter(object):
def __init__(self, savefile):
self.savefile = savefile
fig=plt.figure()
self.ax1 = plt.subplot(2,1,1)
self.ax2 = plt.subplot(2,2,3)
self.ax3 = plt.subplot(2,2,4)
self.ax1.set_xlim([2,13])
self.ax2.set_xlim([1,7])
self.ax3.set_xlim([1,7])
plt.ion()
plt.show()
def get_ans(self):
answer = raw_input().lower()
if answer == 'quit':
print 'You quit!'
self.save()
exit()
elif answer == 'save':
self.save()
elif answer == 'undo':
print "Removed roll %d, %d" % (self.d1_list.pop(), self.d2_list.pop())
print "Last roll now %d, %d" % (self.d1_list[-1], self.d2_list[-1])
try:
answer = int(answer)
except:
print "%s was not a number" % answer
return self.get_ans()
if answer in [1,2,3,4,5,6]:
return answer
else:
print "%d not between 1 and 6" % answer
return self.get_ans()
def save(self):
print 'Saving to %s...' % self.savefile
json.dump({'d1': self.d1_list, 'd2': self.d2_list}, open(self.savefile, 'w'))
print 'Saved.'
def update_plot(self):
bins = np.arange(2, 14, 1)
sub_plot_bins = np.arange(1, 8, 1)
self.ax1.cla()
self.ax2.cla()
self.ax3.cla()
self.ax1.set_xlim([1.5,12.5])
self.ax2.set_xlim([0.5,6.5])
self.ax3.set_xlim([0.5,6.5])
self.ax1.hist(np.array(self.d1_list) + np.array(self.d2_list), bins=bins, range=[1,13], facecolor='g', align='left')
self.ax1.set_xticks(bins[:-1])
self.ax2.hist(np.array(self.d1_list), bins=sub_plot_bins, range=[1,7], facecolor='g', align='left')
self.ax2.set_xticks(sub_plot_bins[:-1])
self.ax3.hist(np.array(self.d2_list), bins=sub_plot_bins, range=[1,7], facecolor='g', align='left')
self.ax3.set_xticks(sub_plot_bins[:-1])
plt.draw()
def start(self):
self.d1_list = []
self.d2_list = []
print "Die 1:"
d1 = counter.get_ans()
print "Die 2:"
d2 = counter.get_ans()
while(1):
self.d1_list.append(d1)
self.d2_list.append(d2)
print "Last roll:", self.d1_list[-1], self.d2_list[-1]
counter.update_plot()
print "Die 1:"
d1 = counter.get_ans()
print "Die 2:"
d2 = counter.get_ans()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Counts dice rolls.')
parser.add_argument('savefile', metavar='FILE', type=str, nargs='?', default=datetime.datetime.now().strftime('%Y%m%d_%H%M_rolls.json'),
help='File to save results to when quitting. Default: YYYYMMDD_HHMM_rolls.json')
args = parser.parse_args()
counter = DieCounter(args.savefile)
counter.start()
| gpl-2.0 |
irhete/predictive-monitoring-benchmark | experiments/experiments_knn.py | 1 | 11024 | import EncoderFactory
from DatasetManager import DatasetManager
import BucketFactory
import pandas as pd
import numpy as np
from sklearn.metrics import roc_auc_score
from sklearn.pipeline import FeatureUnion, Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import NearestNeighbors
import time
import os
import sys
from sys import argv
import pickle
from collections import defaultdict
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
dataset_ref = argv[1]
params_dir = argv[2]
results_dir = argv[3]
bucket_method = argv[4]
cls_encoding = argv[5]
cls_method = argv[6]
gap = int(argv[7])
n_iter = int(argv[8])
if bucket_method == "state":
bucket_encoding = "last"
else:
bucket_encoding = "agg"
method_name = "%s_%s"%(bucket_method, cls_encoding)
dataset_ref_to_datasets = {
"bpic2011": ["bpic2011_f%s"%formula for formula in range(1,5)],
"bpic2015": ["bpic2015_%s_f2"%(municipality) for municipality in range(1,6)],
"insurance": ["insurance_activity", "insurance_followup"],
"sepsis_cases": ["sepsis_cases_1", "sepsis_cases_2", "sepsis_cases_4"]
}
encoding_dict = {
"laststate": ["static", "last"],
"agg": ["static", "agg"],
"index": ["static", "index"],
"combined": ["static", "last", "agg"]
}
datasets = [dataset_ref] if dataset_ref not in dataset_ref_to_datasets else dataset_ref_to_datasets[dataset_ref]
methods = encoding_dict[cls_encoding]
train_ratio = 0.8
random_state = 22
# create results directory
if not os.path.exists(os.path.join(params_dir)):
os.makedirs(os.path.join(params_dir))
for dataset_name in datasets:
# load optimal params
optimal_params_filename = os.path.join(params_dir, "optimal_params_%s_%s_%s.pickle" % (cls_method, dataset_name, method_name))
if not os.path.isfile(optimal_params_filename) or os.path.getsize(optimal_params_filename) <= 0:
continue
with open(optimal_params_filename, "rb") as fin:
args = pickle.load(fin)
# read the data
dataset_manager = DatasetManager(dataset_name)
data = dataset_manager.read_dataset()
cls_encoder_args = {'case_id_col': dataset_manager.case_id_col,
'static_cat_cols': dataset_manager.static_cat_cols,
'static_num_cols': dataset_manager.static_num_cols,
'dynamic_cat_cols': dataset_manager.dynamic_cat_cols,
'dynamic_num_cols': dataset_manager.dynamic_num_cols,
'fillna': True}
# determine min and max (truncated) prefix lengths
min_prefix_length = 1
if "traffic_fines" in dataset_name:
max_prefix_length = 10
elif "bpic2017" in dataset_name:
max_prefix_length = min(20, dataset_manager.get_pos_case_length_quantile(data, 0.90))
else:
max_prefix_length = min(40, dataset_manager.get_pos_case_length_quantile(data, 0.90))
# split into training and test
train, test = dataset_manager.split_data_strict(data, train_ratio, split="temporal")
if gap > 1:
outfile = os.path.join(results_dir, "performance_results_%s_%s_%s_gap%s.csv" % (cls_method, dataset_name, method_name, gap))
else:
outfile = os.path.join(results_dir, "performance_results_%s_%s_%s.csv" % (cls_method, dataset_name, method_name))
start_test_prefix_generation = time.time()
dt_test_prefixes = dataset_manager.generate_prefix_data(test, min_prefix_length, max_prefix_length)
test_prefix_generation_time = time.time() - start_test_prefix_generation
offline_total_times = []
online_event_times = []
train_prefix_generation_times = []
for ii in range(n_iter):
# create prefix logs
start_train_prefix_generation = time.time()
dt_train_prefixes = dataset_manager.generate_prefix_data(train, min_prefix_length, max_prefix_length, gap)
train_prefix_generation_time = time.time() - start_train_prefix_generation
train_prefix_generation_times.append(train_prefix_generation_time)
# Bucketing prefixes based on control flow
knn_encoder_args = {'case_id_col':dataset_manager.case_id_col,
'static_cat_cols':[],
'static_num_cols':[],
'dynamic_cat_cols':[dataset_manager.activity_col],
'dynamic_num_cols':[],
'fillna':True}
# initiate the KNN model
start_offline_time_bucket = time.time()
bucket_encoder = EncoderFactory.get_encoder(bucket_encoding, **knn_encoder_args)
encoded_train = bucket_encoder.fit_transform(dt_train_prefixes)
if "n_neighbors" in args:
n_neighbors = int(args["n_neighbors"])
else:
n_neighbors = 50
bucketer = NearestNeighbors(n_neighbors=n_neighbors, algorithm='auto').fit(encoded_train)
offline_time_bucket = time.time() - start_offline_time_bucket
preds_all = []
test_y_all = []
nr_events_all = []
offline_time_fit = 0
current_online_event_times = []
for _, dt_test_bucket in dt_test_prefixes.groupby(dataset_manager.case_id_col):
# select current test case
test_y_all.extend(dataset_manager.get_label_numeric(dt_test_bucket))
nr_events_all.append(len(dt_test_bucket))
start = time.time()
encoded_case = bucket_encoder.fit_transform(dt_test_bucket)
_, knn_idxs = bucketer.kneighbors(encoded_case)
knn_idxs = knn_idxs[0]
relevant_cases_bucket = encoded_train.iloc[knn_idxs].index
dt_train_bucket = dataset_manager.get_relevant_data_by_indexes(dt_train_prefixes, relevant_cases_bucket) # one row per event
train_y = dataset_manager.get_label_numeric(dt_train_bucket)
if len(set(train_y)) < 2:
preds_all.append(train_y[0])
else:
feature_combiner = FeatureUnion([(method, EncoderFactory.get_encoder(method, **cls_encoder_args)) for method in methods])
if cls_method == "rf":
cls = RandomForestClassifier(n_estimators=500,
max_features=args['max_features'],
random_state=random_state)
elif cls_method == "xgboost":
cls = xgb.XGBClassifier(objective='binary:logistic',
n_estimators=500,
learning_rate= args['learning_rate'],
subsample=args['subsample'],
max_depth=int(args['max_depth']),
colsample_bytree=args['colsample_bytree'],
min_child_weight=int(args['min_child_weight']),
seed=random_state)
elif cls_method == "logit":
cls = LogisticRegression(C=2**args['C'],
random_state=random_state)
elif cls_method == "svm":
cls = SVC(C=2**args['C'],
gamma=2**args['gamma'],
random_state=random_state)
if cls_method == "svm" or cls_method == "logit":
pipeline = Pipeline([('encoder', feature_combiner), ('scaler', StandardScaler()), ('cls', cls)])
else:
pipeline = Pipeline([('encoder', feature_combiner), ('cls', cls)])
pipeline.fit(dt_train_bucket, train_y)
if cls_method == "svm":
preds = pipeline.decision_function(dt_test_bucket)
else:
preds_pos_label_idx = np.where(cls.classes_ == 1)[0][0]
preds = pipeline.predict_proba(dt_test_bucket)[:,preds_pos_label_idx]
preds_all.extend(preds)
pipeline_pred_time = time.time() - start
current_online_event_times.append(pipeline_pred_time / len(dt_test_bucket))
offline_total_time = offline_time_bucket + train_prefix_generation_time
offline_total_times.append(offline_total_time)
online_event_times.append(current_online_event_times)
with open(outfile, 'w') as fout:
fout.write("%s;%s;%s;%s;%s;%s;%s\n"%("dataset", "method", "cls", "nr_events", "n_iter", "metric", "score"))
fout.write("%s;%s;%s;%s;%s;%s;%s\n"%(dataset_name, method_name, cls_method, -1, -1, "test_prefix_generation_time", test_prefix_generation_time))
for ii in range(len(offline_total_times)):
fout.write("%s;%s;%s;%s;%s;%s;%s\n"%(dataset_name, method_name, cls_method, -1, ii, "train_prefix_generation_time", train_prefix_generation_times[ii]))
fout.write("%s;%s;%s;%s;%s;%s;%s\n"%(dataset_name, method_name, cls_method, -1, ii, "offline_time_total", offline_total_times[ii]))
fout.write("%s;%s;%s;%s;%s;%s;%s\n"%(dataset_name, method_name, cls_method, -1, ii, "online_time_avg", np.mean(online_event_times[ii])))
fout.write("%s;%s;%s;%s;%s;%s;%s\n"%(dataset_name, method_name, cls_method, -1, ii, "online_time_std", np.std(online_event_times[ii])))
dt_results = pd.DataFrame({"actual": test_y_all, "predicted": preds_all, "nr_events": nr_events_all})
for nr_events, group in dt_results.groupby("nr_events"):
if len(set(group.actual)) < 2:
fout.write("%s;%s;%s;%s;%s;%s;%s\n"%(dataset_name, method_name, cls_method, nr_events, -1, "auc", np.nan))
else:
fout.write("%s;%s;%s;%s;%s;%s;%s\n"%(dataset_name, method_name, cls_method, nr_events, -1, "auc", roc_auc_score(group.actual, group.predicted)))
fout.write("%s;%s;%s;%s;%s;%s;%s\n"%(dataset_name, method_name, cls_method, -1, -1, "auc", roc_auc_score(dt_results.actual, dt_results.predicted)))
online_event_times_flat = [t for iter_online_event_times in online_event_times for t in iter_online_event_times]
fout.write("%s;%s;%s;%s;%s;%s;%s\n"%(dataset_name, method_name, cls_method, -1, -1, "online_time_avg", np.mean(online_event_times_flat)))
fout.write("%s;%s;%s;%s;%s;%s;%s\n"%(dataset_name, method_name, cls_method, -1, -1, "online_time_std", np.std(online_event_times_flat)))
fout.write("%s;%s;%s;%s;%s;%s;%s\n"%(dataset_name, method_name, cls_method, -1, -1, "offline_time_total_avg", np.mean(offline_total_times)))
fout.write("%s;%s;%s;%s;%s;%s;%s\n"%(dataset_name, method_name, cls_method, -1, -1, "offline_time_total_std", np.std(offline_total_times))) | apache-2.0 |
zycdragonball/tensorflow | tensorflow/contrib/labeled_tensor/python/ops/ops.py | 77 | 46403 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Non-core ops for LabeledTensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import types
import numpy as np
from six import string_types
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
from tensorflow.python.ops import random_ops
from tensorflow.python.training import input # pylint: disable=redefined-builtin
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensor, ops.Tensor, core.Axis,
tc.Optional(string_types))
def _gather_1d_on_axis(labeled_tensor, indexer, axis, name=None):
with ops.name_scope(name, 'lt_take', [labeled_tensor]) as scope:
temp_axes = core.Axes([axis] + list(
labeled_tensor.axes.remove(axis.name).values()))
transposed = core.transpose(labeled_tensor, temp_axes.keys())
indexed = core.LabeledTensor(
array_ops.gather(transposed.tensor, indexer), temp_axes)
return core.transpose(indexed, labeled_tensor.axes.keys(), name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types,
tc.Union(slice, collections.Hashable, list)),
tc.Optional(string_types))
def select(labeled_tensor, selection, name=None):
"""Slice out a subset of the tensor.
Args:
labeled_tensor: The input tensor.
selection: A dictionary mapping an axis name to a scalar, slice or list of
values to select. Currently supports two types of selections:
(a) Any number of scalar and/or slice selections.
(b) Exactly one list selection, without any scalars or slices.
name: Optional op name.
Returns:
The selection as a `LabeledTensor`.
Raises:
ValueError: If the tensor doesn't have an axis in the selection or if
that axis lacks labels.
KeyError: If any labels in a selection are not found in the original axis.
NotImplementedError: If you attempt to combine a list selection with
scalar selection or another list selection.
"""
with ops.name_scope(name, 'lt_select', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
slices = {}
indexers = {}
for axis_name, value in selection.items():
if axis_name not in labeled_tensor.axes:
raise ValueError(
'The tensor does not have an axis named %s. Its axes are: %r' %
(axis_name, labeled_tensor.axes.keys()))
axis = labeled_tensor.axes[axis_name]
if axis.labels is None:
raise ValueError(
'The axis named %s does not have labels. The axis is: %r' %
(axis_name, axis))
if isinstance(value, slice):
# TODO(shoyer): consider deprecating using slices in favor of lists
if value.start is None:
start = None
else:
start = axis.index(value.start)
if value.stop is None:
stop = None
else:
# For now, follow the pandas convention of making labeled slices
# inclusive of both bounds.
stop = axis.index(value.stop) + 1
if value.step is not None:
raise NotImplementedError('slicing with a step is not yet supported')
slices[axis_name] = slice(start, stop)
# Needs to be after checking for slices, since slice objects claim to be
# instances of collections.Hashable but hash() on them fails.
elif isinstance(value, collections.Hashable):
slices[axis_name] = axis.index(value)
elif isinstance(value, list):
if indexers:
raise NotImplementedError(
'select does not yet support more than one list selection at '
'the same time')
indexer = [axis.index(v) for v in value]
indexers[axis_name] = ops.convert_to_tensor(indexer, dtype=dtypes.int64)
else:
# If type checking is working properly, this shouldn't be possible.
raise TypeError('cannot handle arbitrary types')
if indexers and slices:
raise NotImplementedError(
'select does not yet support combined scalar and list selection')
# For now, handle array selection separately, because tf.gather_nd does
# not support gradients yet. Later, using gather_nd will let us combine
# these paths.
if indexers:
(axis_name, indexer), = indexers.items()
axis = core.Axis(axis_name, selection[axis_name])
return _gather_1d_on_axis(labeled_tensor, indexer, axis, name=scope)
else:
return core.slice_function(labeled_tensor, slices, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike), string_types,
tc.Optional(string_types))
def concat(labeled_tensors, axis_name, name=None):
"""Concatenate tensors along a dimension.
See tf.concat.
Args:
labeled_tensors: A list of input LabeledTensors.
axis_name: The name of the axis along which to concatenate.
name: Optional op name.
Returns:
The concatenated tensor.
The coordinate labels for the concatenation dimension are also concatenated,
if they are available for every tensor.
Raises:
ValueError: If fewer than one tensor inputs is provided, if the tensors
have incompatible axes, or if `axis_name` isn't the name of an axis.
"""
with ops.name_scope(name, 'lt_concat', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('concat expects at least 1 tensor, but received %s' %
labeled_tensors)
# All tensors must have these axes.
axes_0 = labeled_tensors[0].axes
axis_names = list(axes_0.keys())
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
shared_axes = axes_0.remove(axis_name)
tensors = [labeled_tensors[0].tensor]
concat_axis_list = [axes_0[axis_name]]
for labeled_tensor in labeled_tensors[1:]:
current_shared_axes = labeled_tensor.axes.remove(axis_name)
if current_shared_axes != shared_axes:
# TODO(shoyer): add more specific checks about what went wrong,
# including raising AxisOrderError when appropriate
raise ValueError('Mismatched shared axes: the first tensor '
'had axes %r but this tensor has axes %r.' %
(shared_axes, current_shared_axes))
# Accumulate the axis labels, if they're available.
concat_axis_list.append(labeled_tensor.axes[axis_name])
tensors.append(labeled_tensor.tensor)
concat_axis = core.concat_axes(concat_axis_list)
concat_dimension = axis_names.index(axis_name)
concat_tensor = array_ops.concat(tensors, concat_dimension, name=scope)
values = list(axes_0.values())
concat_axes = (values[:concat_dimension] + [concat_axis] +
values[concat_dimension + 1:])
return core.LabeledTensor(concat_tensor, concat_axes)
# TODO(shoyer): rename pack/unpack to stack/unstack
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike),
tc.Union(string_types, core.AxisLike), int, tc.Optional(string_types))
def pack(labeled_tensors, new_axis, axis_position=0, name=None):
"""Pack tensors along a new axis.
See tf.pack.
Args:
labeled_tensors: The input tensors, which must have identical axes.
new_axis: The name of the new axis, or a tuple containing the name
and coordinate labels.
axis_position: Optional integer position at which to insert the new axis.
name: Optional op name.
Returns:
The packed tensors as a single LabeledTensor, with `new_axis` in the given
`axis_position`.
Raises:
ValueError: If fewer than one input tensors is provided, or if the tensors
don't have identical axes.
"""
with ops.name_scope(name, 'lt_pack', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('pack expects at least 1 tensors, but received %s' %
labeled_tensors)
axes_0 = labeled_tensors[0].axes
for t in labeled_tensors:
if t.axes != axes_0:
raise ValueError('Non-identical axes. Expected %s but got %s' %
(axes_0, t.axes))
pack_op = array_ops.stack(
[t.tensor for t in labeled_tensors], axis=axis_position, name=scope)
axes = list(axes_0.values())
axes.insert(axis_position, new_axis)
return core.LabeledTensor(pack_op, axes)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(core.LabeledTensorLike,
tc.Optional(string_types), tc.Optional(string_types))
def unpack(labeled_tensor, axis_name=None, name=None):
"""Unpack the tensor.
See tf.unpack.
Args:
labeled_tensor: The input tensor.
axis_name: Optional name of axis to unpack. By default, the first axis is
used.
name: Optional op name.
Returns:
The list of unpacked LabeledTensors.
Raises:
ValueError: If `axis_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_unpack', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
axis_names = list(labeled_tensor.axes.keys())
if axis_name is None:
axis_name = axis_names[0]
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
axis = axis_names.index(axis_name)
unpack_ops = array_ops.unstack(labeled_tensor.tensor, axis=axis, name=scope)
axes = [a for i, a in enumerate(labeled_tensor.axes.values()) if i != axis]
return [core.LabeledTensor(t, axes) for t in unpack_ops]
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Collection(string_types),
tc.Collection(tc.Union(string_types, core.AxisLike)),
tc.Optional(string_types))
def reshape(labeled_tensor, existing_axes, new_axes, name=None):
"""Reshape specific axes of a LabeledTensor.
Non-indicated axes remain in their original locations.
Args:
labeled_tensor: The input tensor.
existing_axes: List of axis names found on the input tensor. These must
appear sequentially in the list of axis names on the input. In other
words, they must be a valid slice of `list(labeled_tensor.axes.keys())`.
new_axes: List of strings, tuples of (axis_name, axis_value) or Axis objects
providing new axes with which to replace `existing_axes` in the reshaped
result. At most one element of `new_axes` may be a string, indicating an
axis with unknown size.
name: Optional op name.
Returns:
The reshaped LabeledTensor.
Raises:
ValueError: If `existing_axes` are not all axes on the input, or if more
than one of `new_axes` has unknown size.
AxisOrderError: If `existing_axes` are not a slice of axis names on the
input.
"""
with ops.name_scope(name, 'lt_reshape', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
original_axis_names = list(labeled_tensor.axes.keys())
existing_axes = list(existing_axes)
if not set(existing_axes) <= set(original_axis_names):
raise ValueError('existing_axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_axes, original_axis_names))
start = original_axis_names.index(existing_axes[0])
stop = original_axis_names.index(existing_axes[-1]) + 1
if existing_axes != original_axis_names[start:stop]:
# We could support existing_axes that aren't a slice by using transpose,
# but that could lead to unpredictable performance consequences because
# transposes are not free in TensorFlow. If we did transpose
# automatically, the user might never realize that their data is being
# produced with the wrong order. (The later will occur with some frequency
# because of how broadcasting automatically choose axis order.)
# So for now we've taken the strict approach.
raise core.AxisOrderError(
'existing_axes %r are not a slice of axis names %r on the input '
'labeled tensor. Use `transpose` or `impose_axis_order` to reorder '
'axes on the input explicitly.' %
(existing_axes, original_axis_names))
if sum(isinstance(axis, string_types) for axis in new_axes) > 1:
raise ValueError(
'at most one axis in new_axes can have unknown size. All other '
'axes must have an indicated integer size or labels: %r' % new_axes)
original_values = list(labeled_tensor.axes.values())
axis_size = lambda axis: -1 if axis.size is None else axis.size
shape = [axis_size(axis) for axis in original_values[:start]]
for axis_ref in new_axes:
if isinstance(axis_ref, string_types):
shape.append(-1)
else:
axis = core.as_axis(axis_ref)
shape.append(axis_size(axis))
shape.extend(axis_size(axis) for axis in original_values[stop:])
reshaped_tensor = array_ops.reshape(
labeled_tensor.tensor, shape, name=scope)
axes = original_values[:start] + list(new_axes) + original_values[stop:]
return core.LabeledTensor(reshaped_tensor, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, string_types,
tc.Optional(string_types))
def rename_axis(labeled_tensor, existing_name, new_name, name=None):
"""Rename an axis of LabeledTensor.
Args:
labeled_tensor: The input tensor.
existing_name: Name for an existing axis on the input.
new_name: Desired replacement name.
name: Optional op name.
Returns:
LabeledTensor with renamed axis.
Raises:
ValueError: If `existing_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_rename_axis', [labeled_tensor]) as scope:
if existing_name not in labeled_tensor.axes:
raise ValueError('existing_name %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_name, labeled_tensor.axes.keys()))
new_axis = core.Axis(new_name, labeled_tensor.axes[existing_name].value)
return reshape(labeled_tensor, [existing_name], [new_axis], name=scope)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(string_types, collections.Callable, int, bool,
tc.Collection(core.LabeledTensorLike), bool,
tc.Optional(string_types))
def _batch_helper(default_name,
batch_fn,
batch_size,
enqueue_many,
labeled_tensors,
allow_smaller_final_batch,
name=None):
with ops.name_scope(name, default_name, labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
batch_ops = batch_fn([t.tensor for t in labeled_tensors], scope)
# TODO(shoyer): Remove this when they sanitize the TF API.
if not isinstance(batch_ops, list):
assert isinstance(batch_ops, ops.Tensor)
batch_ops = [batch_ops]
if allow_smaller_final_batch:
batch_size = None
@tc.returns(core.Axes)
@tc.accepts(core.Axes)
def output_axes(axes):
if enqueue_many:
if 'batch' not in axes or list(axes.keys()).index('batch') != 0:
raise ValueError(
'When enqueue_many is True, input tensors must have an axis '
'called "batch" as their first dimension, '
'but axes were %s' % axes)
culled_axes = axes.remove('batch')
return core.Axes([('batch', batch_size)] + list(culled_axes.values()))
else:
return core.Axes([('batch', batch_size)] + list(axes.values()))
output_labeled_tensors = []
for i, tensor in enumerate(batch_ops):
axes = output_axes(labeled_tensors[i].axes)
output_labeled_tensors.append(core.LabeledTensor(tensor, axes))
return output_labeled_tensors
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, bool,
tc.Optional(string_types))
def batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, int,
tc.Optional(int), bool, tc.Optional(string_types))
def shuffle_batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
min_after_dequeue=0,
seed=None,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor, with shuffling.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
min_after_dequeue: Minimum number of elements in the queue after a dequeue,
used to ensure mixing.
seed: Optional random seed.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.shuffle_batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
min_after_dequeue=min_after_dequeue,
seed=seed,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_shuffle_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types, int),
tc.Optional(int), tc.Optional(string_types))
def random_crop(labeled_tensor, shape_map, seed=None, name=None):
"""Randomly crops a tensor to a given size.
See tf.random_crop.
Args:
labeled_tensor: The input tensor.
shape_map: A dictionary mapping axis names to the size of the random crop
for that dimension.
seed: An optional random seed.
name: An optional op name.
Returns:
A tensor of the same rank as `labeled_tensor`, cropped randomly in the
selected dimensions.
Raises:
ValueError: If the shape map contains an axis name not in the input tensor.
"""
with ops.name_scope(name, 'lt_random_crop', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
for axis_name in shape_map:
if axis_name not in labeled_tensor.axes:
raise ValueError('Selection axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
shape = []
axes = []
for axis in labeled_tensor.axes.values():
if axis.name in shape_map:
size = shape_map[axis.name]
shape.append(size)
# We lose labels for the axes we crop, leaving just the size.
axes.append((axis.name, size))
else:
shape.append(len(axis))
axes.append(axis)
crop_op = random_ops.random_crop(
labeled_tensor.tensor, shape, seed=seed, name=scope)
return core.LabeledTensor(crop_op, axes)
# TODO(shoyer): Allow the user to select the axis over which to map.
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
tc.Optional(string_types))
def map_fn(fn, labeled_tensor, name=None):
"""Map on the list of tensors unpacked from labeled_tensor.
See tf.map_fn.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type LabeledTensor -> LabeledTensor.
labeled_tensor: The input tensor.
name: Optional op name.
Returns:
A tensor that packs the results of applying fn to the list of tensors
unpacked from labeled_tensor.
"""
with ops.name_scope(name, 'lt_map_fn', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
unpack_lts = unpack(labeled_tensor)
# TODO(ericmc): Fix this upstream.
if labeled_tensor.dtype == dtypes.string:
# We must construct the full graph here, because functional_ops.map_fn
# doesn't work for string-valued tensors.
# Constructing the full graph may be slow.
map_lts = [fn(t) for t in unpack_lts]
return pack(map_lts, list(labeled_tensor.axes.values())[0], name=scope)
else:
# Figure out what the axis labels should be, but use tf.map_fn to
# construct the graph because it's efficient.
# It may be slow to construct the full graph, so we infer the labels from
# the first element.
# TODO(ericmc): This builds a subgraph which then gets thrown away.
# Find a more elegant solution.
first_map_lt = fn(unpack_lts[0])
final_axes = list(labeled_tensor.axes.values())[:1] + list(
first_map_lt.axes.values())
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor)
def tf_fn(tensor):
original_axes = list(labeled_tensor.axes.values())[1:]
tensor_lt = core.LabeledTensor(tensor, original_axes)
return fn(tensor_lt).tensor
map_op = functional_ops.map_fn(tf_fn, labeled_tensor.tensor)
map_lt = core.LabeledTensor(map_op, final_axes)
return core.identity(map_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def foldl(fn, labeled_tensor, initial_value, name=None):
"""Left fold on the list of tensors unpacked from labeled_tensor.
See tf.foldl.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type (LabeledTensor, LabeledTensor) -> LabeledTensor.
Its arguments are (accumulated_value, next_value).
labeled_tensor: The input tensor.
initial_value: The initial value of the accumulator.
name: Optional op name.
Returns:
The accumulated value.
"""
with ops.name_scope(name, 'lt_foldl',
[labeled_tensor, initial_value]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
initial_value = core.convert_to_labeled_tensor(initial_value)
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor, ops.Tensor)
def tf_fn(accumulator, next_element):
accumulator_lt = core.LabeledTensor(accumulator, initial_value.axes)
next_element_lt = core.LabeledTensor(
next_element, list(labeled_tensor.axes.values())[1:])
return fn(accumulator_lt, next_element_lt).tensor
foldl_op = functional_ops.foldl(
tf_fn, labeled_tensor.tensor, initializer=initial_value.tensor)
foldl_lt = core.LabeledTensor(foldl_op, initial_value.axes)
return core.identity(foldl_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(tc.Collection(string_types)), tc.Optional(string_types))
def squeeze(labeled_tensor, axis_names=None, name=None):
"""Remove size-1 dimensions.
See tf.squeeze.
Args:
labeled_tensor: The input tensor.
axis_names: The names of the dimensions to remove, or None to remove
all size-1 dimensions.
name: Optional op name.
Returns:
A tensor with the specified dimensions removed.
Raises:
ValueError: If the named axes are not in the tensor, or if they are
not size-1.
"""
with ops.name_scope(name, 'lt_squeeze', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axis_names is None:
axis_names = [a.name for a in labeled_tensor.axes.values() if len(a) == 1]
for axis_name in axis_names:
if axis_name not in labeled_tensor.axes:
raise ValueError('axis %s is not in tensor axes %s' %
(axis_name, labeled_tensor.axes))
elif len(labeled_tensor.axes[axis_name]) != 1:
raise ValueError(
'cannot squeeze axis with size greater than 1: (%s, %s)' %
(axis_name, labeled_tensor.axes[axis_name]))
squeeze_dimensions = []
axes = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in axis_names:
squeeze_dimensions.append(i)
else:
axes.append(axis)
if squeeze_dimensions:
squeeze_op = array_ops.squeeze(
labeled_tensor.tensor, squeeze_dimensions, name=scope)
else:
squeeze_op = array_ops.identity(labeled_tensor.tensor, name=scope)
return core.LabeledTensor(squeeze_op, axes)
# pylint: disable=invalid-name
ReduceAxis = tc.Union(string_types,
tc.Tuple(string_types, collections.Hashable))
ReduceAxes = tc.Optional(tc.Union(ReduceAxis, tc.Collection(ReduceAxis)))
# pylint: enable=invalid-name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def matmul(a, b, name=None):
"""Matrix multiply two tensors with rank 1 or 2.
If both tensors have rank 2, a matrix-matrix product is performed.
If one tensor has rank 1 and the other has rank 2, then a matrix-vector
product is performed.
If both tensors have rank 1, then a vector dot-product is performed.
(This behavior matches that of `numpy.dot`.)
Both tensors must share exactly one dimension in common, which is the
dimension the operation is summed along. The inputs will be automatically
transposed if necessary as part of the matmul op.
We intend to eventually support `matmul` on higher rank input, and also
eventually support summing over any number shared dimensions (via an `axis`
argument), but neither of these features has been implemented yet.
Args:
a: First LabeledTensor.
b: Second LabeledTensor.
name: Optional op name.
Returns:
LabeledTensor with the result of matrix multiplication. Axes are ordered by
the current axis_order_scope, if set, or in or order of appearance on the
inputs.
Raises:
NotImplementedError: If inputs have rank >2 or share multiple axes.
ValueError: If the inputs have rank 0 or do not share any axes.
"""
with ops.name_scope(name, 'lt_matmul', [a, b]) as scope:
a = core.convert_to_labeled_tensor(a)
b = core.convert_to_labeled_tensor(b)
if len(a.axes) > 2 or len(b.axes) > 2:
# We could pass batched inputs to tf.matmul to make this work, but we
# would also need to use tf.tile and/or tf.transpose. These are more
# expensive than doing reshapes, so it's not clear if it's a good idea to
# do this automatically.
raise NotImplementedError(
'matmul currently requires inputs with rank 2 or less, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
if not a.axes or not b.axes:
raise ValueError(
'matmul currently requires inputs with at least rank 1, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
shared_axes = set(a.axes) & set(b.axes)
if len(shared_axes) > 1:
raise NotImplementedError(
'matmul does not yet support summing over multiple shared axes: %r. '
'Use transpose and reshape to create a single shared axis to sum '
'over.' % shared_axes)
if not shared_axes:
raise ValueError('there must have exactly one axis in common between '
'input to matmul: %r, %r' %
(a.axes.keys(), b.axes.keys()))
shared_axis, = shared_axes
if a.axes[shared_axis] != b.axes[shared_axis]:
raise ValueError('axis %r does not match on input arguments: %r vs %r' %
(shared_axis, a.axes[shared_axis].value,
b.axes[shared_axis].value))
result_axes = []
for axes in [a.axes, b.axes]:
for axis in axes.values():
if axis.name != shared_axis:
result_axes.append(axis)
axis_scope_order = core.get_axis_order()
if axis_scope_order is not None:
result_axis_names = [axis.name for axis in result_axes]
new_axis_names = [
name for name in axis_scope_order if name in result_axis_names
]
if new_axis_names != result_axis_names:
# switch a and b
b, a = a, b
# result_axes is a list of length 1 or 2
result_axes = result_axes[::-1]
squeeze_dims = []
if len(a.axes) == 1:
a_tensor = array_ops.reshape(a.tensor, (1, -1))
squeeze_dims.append(0)
transpose_a = False
else:
a_tensor = a.tensor
transpose_a = list(a.axes.keys()).index(shared_axis) == 0
if len(b.axes) == 1:
b_tensor = array_ops.reshape(b.tensor, (-1, 1))
squeeze_dims.append(1)
transpose_b = False
else:
b_tensor = b.tensor
transpose_b = list(b.axes.keys()).index(shared_axis) == 1
result_op = math_ops.matmul(
a_tensor, b_tensor, transpose_a=transpose_a, transpose_b=transpose_b)
if squeeze_dims:
result_op = array_ops.squeeze(result_op, squeeze_dims)
result_op = array_ops.identity(result_op, name=scope)
return core.LabeledTensor(result_op, result_axes)
@tc.returns(types.FunctionType)
@tc.accepts(string_types, collections.Callable)
def define_reduce_op(op_name, reduce_fn):
"""Define a reduction op for labeled tensors.
Args:
op_name: string name of the TensorFlow op.
reduce_fn: function to call to evaluate the op on a tf.Tensor.
Returns:
Function defining the given reduction op that acts on a LabeledTensor.
"""
default_name = 'lt_%s' % op_name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, ReduceAxes, tc.Optional(string_types))
def op(labeled_tensor, axes=None, name=None):
"""Computes the given reduction across the given axes of a LabeledTensor.
See `tf.{op_name}` for full details.
Args:
labeled_tensor: The input tensor.
axes: A set of axes or None.
If None, all axes will be reduced.
Axes must all be strings, in which case those dimensions will be
removed, or pairs of (name, None) or (name, label), in which case those
dimensions will be kept.
name: Optional op name.
Returns:
The reduced LabeledTensor.
Raises:
ValueError: if any of the axes to reduce over are not found on
`labeled_tensor`.
"""
with ops.name_scope(name, default_name, [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axes is None:
axes = labeled_tensor.axes.keys()
if isinstance(axes, (string_types, tuple)):
axes = [axes]
reduction_axes = {}
axes_to_squeeze = []
for a in axes:
if isinstance(a, string_types):
# We squeeze out this axis.
reduction_axes[a] = a
axes_to_squeeze.append(a)
else:
# We keep this axis, with the user-provided labels.
(axis_name, label) = a
if label is not None:
# The input was a single label, so make it a list so it can be
# turned into an Axis.
label = [label]
reduction_axes[axis_name] = (axis_name, label)
for axis_name in reduction_axes:
if axis_name not in labeled_tensor.axes:
raise ValueError('Axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
intermediate_axes = []
reduction_dimensions = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in reduction_axes:
intermediate_axes.append(reduction_axes[axis.name])
reduction_dimensions.append(i)
else:
intermediate_axes.append(axis)
reduce_op = reduce_fn(
labeled_tensor.tensor, reduction_dimensions, keep_dims=True)
reduce_lt = core.LabeledTensor(reduce_op, intermediate_axes)
return squeeze(reduce_lt, axes_to_squeeze, name=scope)
op.__doc__ = op.__doc__.format(op_name=op_name)
op.__name__ = op_name
return op
reduce_all = define_reduce_op('reduce_all', math_ops.reduce_all)
reduce_any = define_reduce_op('reduce_any', math_ops.reduce_any)
reduce_logsumexp = define_reduce_op('reduce_logsumexp',
math_ops.reduce_logsumexp)
reduce_max = define_reduce_op('reduce_max', math_ops.reduce_max)
reduce_mean = define_reduce_op('reduce_mean', math_ops.reduce_mean)
reduce_min = define_reduce_op('reduce_min', math_ops.reduce_min)
reduce_prod = define_reduce_op('reduce_prod', math_ops.reduce_prod)
reduce_sum = define_reduce_op('reduce_sum', math_ops.reduce_sum)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Union(int, ops.Tensor)),
tc.Optional(string_types))
def tile(labeled_tensor, multiples, name=None):
"""Constructs a tensor by tiling a given tensor.
Only axes without tick-labels can be tiled. (Otherwise, axis labels on tiled
tensors would no longer be unique.)
See lt.tile.
Args:
labeled_tensor: The input tensor.
multiples: A mapping where the keys are axis names and the values are the
integer number of times to tile along that axis. Only axes with a multiple
different than 1 need be included.
name: Optional op name.
Returns:
A tensor with the indicated axes tiled.
Raises:
ValueError: If the tiled axes are not axes in the input tensor, or if any
axes in multiples have tick labels.
"""
with ops.name_scope(name, 'lt_tile', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(multiples.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('tile axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(multiples.keys(), labeled_tensor.axes))
labeled_axes = [
name for name in multiples
if labeled_tensor.axes[name].labels is not None
]
if labeled_axes:
raise ValueError('cannot tile axes with tick labels: %r' % labeled_axes)
multiples_list = [multiples.get(name, 1) for name in labeled_tensor.axes]
tile_op = array_ops.tile(labeled_tensor.tensor, multiples_list, name=scope)
new_axes = [
axis.name if axis.labels is None else axis
for axis in labeled_tensor.axes.values()
]
return core.LabeledTensor(tile_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Tuple(core.AxisValue, core.AxisValue)),
string_types, tc.Optional(string_types))
def pad(labeled_tensor, paddings, mode='CONSTANT', name=None):
"""Pads a tensor.
See tf.pad.
Args:
labeled_tensor: The input tensor.
paddings: A mapping where the keys are axis names and the values are
tuples where the first element is the padding to insert at the beginning
of the axis and the second is the padding to insert at the end of the
axis.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC".
name: Optional op name.
Returns:
A tensor with the indicated axes padded, optionally with those axes extended
with the provided labels.
Raises:
ValueError: If the padded axes are not axes in the input tensor.
"""
with ops.name_scope(name, 'lt_pad', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(paddings.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('pad axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(paddings.keys(), labeled_tensor.axes))
new_axes = []
padding_pairs = []
for name, axis in labeled_tensor.axes.items():
if name in paddings:
padding_before, padding_after = paddings[name]
axis_before = core.Axis(name, padding_before)
axis_after = core.Axis(name, padding_after)
new_axes.append(core.concat_axes([axis_before, axis, axis_after]))
padding_pairs.append((len(axis_before), len(axis_after)))
else:
new_axes.append(axis)
padding_pairs.append((0, 0))
pad_op = array_ops.pad(labeled_tensor.tensor,
padding_pairs,
mode,
name=scope)
return core.LabeledTensor(pad_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Union(np.ndarray, list, tuple, core.Scalar),
tc.Optional(dtypes.DType),
tc.Optional(
tc.Union(core.Axes, tc.Collection(
tc.Union(string_types, core.AxisLike)))), tc.Optional(string_types))
def constant(value, dtype=None, axes=None, name=None):
"""Creates a constant tensor.
If `axes` includes any strings, shape is inferred from `value`. Otherwise,
the sizes of the given `axes` are used to set `shape` for `tf.constant`.
See tf.constant for more details.
Args:
value: The input tensor.
dtype: The type of the returned tensor.
axes: Optional Axes, list of strings or list of objects coercible to Axis
objects. By default, axes are assumed to be an empty list (i.e., `value`
is treated as a scalar).
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_constant', [value]) as scope:
if axes is None:
axes = []
if isinstance(axes, core.Axes):
axes = axes.values()
if any(isinstance(ax, string_types) for ax in axes):
# need to infer shape
shape = None
else:
# axes already indicate shape
axes = [core.as_axis(a) for a in axes]
shape = [a.size for a in axes]
op = array_ops.constant(value, dtype=dtype, shape=shape, name=scope)
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def zeros_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to zero.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_zeros_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.zeros_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def ones_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to one.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to one.
"""
with ops.name_scope(name, 'lt_ones_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.ones_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def cast(labeled_tensor, dtype=None, name=None):
"""Casts a labeled tensor to a new type.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
A labeled tensor with the new dtype.
"""
with ops.name_scope(name, 'lt_cast', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = math_ops.cast(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, tc.Optional(string_types))
def verify_tensor_all_finite(labeled_tensor, message, name=None):
"""Asserts a tensor doesn't contain NaNs or Infs.
See tf.verify_tensor_all_finite.
Args:
labeled_tensor: The input tensor.
message: Message to log on failure.
name: Optional op name.
Returns:
The input tensor.
"""
with ops.name_scope(name, 'lt_verify_tensor_all_finite',
[labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = numerics.verify_tensor_all_finite(
labeled_tensor.tensor, msg=message, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def boolean_mask(labeled_tensor, mask, name=None):
"""Apply a boolean mask to a labeled tensor.
Unlike `tf.boolean_mask`, this currently only works on 1-dimensional masks.
The mask is applied to the first axis of `labeled_tensor`. Labels on the first
axis are removed, because True indices in `mask` may not be known dynamically.
Args:
labeled_tensor: The input tensor.
mask: The type of the returned tensor.
name: Optional op name.
Returns:
The masked labeled tensor.
Raises:
ValueError: if the first axis of the mask
"""
with ops.name_scope(name, 'lt_boolean_mask', [labeled_tensor, mask]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
mask = core.convert_to_labeled_tensor(mask)
if len(mask.axes) > 1:
raise NotImplementedError(
"LabeledTensor's boolean_mask currently only supports 1D masks")
mask_axis = list(mask.axes.values())[0]
lt_axis = list(labeled_tensor.axes.values())[0]
if mask_axis != lt_axis:
raise ValueError('the first axis of the labeled tensor and the mask '
'are not equal:\n%r\n%r' % (lt_axis, mask_axis))
op = array_ops.boolean_mask(labeled_tensor.tensor, mask.tensor, name=scope)
# TODO(shoyer): attempt to infer labels for the masked values, by calling
# tf.contrib.util.constant_value on the mask?
axes = [lt_axis.name] + list(labeled_tensor.axes.values())[1:]
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def where(condition, x, y, name=None):
"""Return elements from x or y depending on condition.
See `tf.where` for more details. This function currently only implements the
three argument version of where.
Args:
condition: LabeledTensor of type `bool`.
x: LabeledTensor for values where condition is true.
y: LabeledTensor for values where condition is false.
name: Optional op name.
Returns:
The labeled tensor with values according to condition.
Raises:
ValueError: if `x` and `y` have different axes, or if the axes of `x` do not
start with the axes of `condition`.
"""
with ops.name_scope(name, 'lt_where', [condition, x, y]) as scope:
condition = core.convert_to_labeled_tensor(condition)
x = core.convert_to_labeled_tensor(x)
y = core.convert_to_labeled_tensor(y)
if not condition.axes == x.axes == y.axes:
raise ValueError('all inputs to `where` must have equal axes')
op = array_ops.where(condition.tensor, x.tensor, y.tensor, name=scope)
return core.LabeledTensor(op, x.axes)
| apache-2.0 |
detrout/debian-statsmodels | statsmodels/sandbox/survival2.py | 35 | 17924 | #Kaplan-Meier Estimator
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as plt
from scipy import stats
from statsmodels.iolib.table import SimpleTable
class KaplanMeier(object):
"""
KaplanMeier(...)
KaplanMeier(data, endog, exog=None, censoring=None)
Create an object of class KaplanMeier for estimating
Kaplan-Meier survival curves.
Parameters
----------
data: array_like
An array, with observations in each row, and
variables in the columns
endog: index (starting at zero) of the column
containing the endogenous variable (time)
exog: index of the column containing the exogenous
variable (must be catagorical). If exog = None, this
is equivalent to a single survival curve
censoring: index of the column containing an indicator
of whether an observation is an event, or a censored
observation, with 0 for censored, and 1 for an event
Attributes
-----------
censorings: List of censorings associated with each unique
time, at each value of exog
events: List of the number of events at each unique time
for each value of exog
results: List of arrays containing estimates of the value
value of the survival function and its standard error
at each unique time, for each value of exog
ts: List of unique times for each value of exog
Methods
-------
fit: Calcuate the Kaplan-Meier estimates of the survival
function and its standard error at each time, for each
value of exog
plot: Plot the survival curves using matplotlib.plyplot
summary: Display the results of fit in a table. Gives results
for all (including censored) times
test_diff: Test for difference between survival curves
Examples
--------
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from statsmodels.sandbox.survival2 import KaplanMeier
>>> dta = sm.datasets.strikes.load()
>>> dta = dta.values()[-1]
>>> dta[range(5),:]
array([[ 7.00000000e+00, 1.13800000e-02],
[ 9.00000000e+00, 1.13800000e-02],
[ 1.30000000e+01, 1.13800000e-02],
[ 1.40000000e+01, 1.13800000e-02],
[ 2.60000000e+01, 1.13800000e-02]])
>>> km = KaplanMeier(dta,0)
>>> km.fit()
>>> km.plot()
Doing
>>> km.summary()
will display a table of the estimated survival and standard errors
for each time. The first few lines are
Kaplan-Meier Curve
=====================================
Time Survival Std. Err
-------------------------------------
1.0 0.983870967742 0.0159984306572
2.0 0.91935483871 0.0345807888235
3.0 0.854838709677 0.0447374942184
4.0 0.838709677419 0.0467104592871
5.0 0.822580645161 0.0485169952543
Doing
>>> plt.show()
will plot the survival curve
Mutliple survival curves:
>>> km2 = KaplanMeier(dta,0,exog=1)
>>> km2.fit()
km2 will estimate a survival curve for each value of industrial
production, the column of dta with index one (1).
With censoring:
>>> censoring = np.ones_like(dta[:,0])
>>> censoring[dta[:,0] > 80] = 0
>>> dta = np.c_[dta,censoring]
>>> dta[range(5),:]
array([[ 7.00000000e+00, 1.13800000e-02, 1.00000000e+00],
[ 9.00000000e+00, 1.13800000e-02, 1.00000000e+00],
[ 1.30000000e+01, 1.13800000e-02, 1.00000000e+00],
[ 1.40000000e+01, 1.13800000e-02, 1.00000000e+00],
[ 2.60000000e+01, 1.13800000e-02, 1.00000000e+00]])
>>> km3 = KaplanMeier(dta,0,exog=1,censoring=2)
>>> km3.fit()
Test for difference of survival curves
>>> log_rank = km3.test_diff([0.0645,-0.03957])
The zeroth element of log_rank is the chi-square test statistic
for the difference between the survival curves for exog = 0.0645
and exog = -0.03957, the index one element is the degrees of freedom for
the test, and the index two element is the p-value for the test
Groups with nan names
>>> groups = np.ones_like(dta[:,1])
>>> groups = groups.astype('S4')
>>> groups[dta[:,1] > 0] = 'high'
>>> groups[dta[:,1] <= 0] = 'low'
>>> dta = dta.astype('S4')
>>> dta[:,1] = groups
>>> dta[range(5),:]
array([['7.0', 'high', '1.0'],
['9.0', 'high', '1.0'],
['13.0', 'high', '1.0'],
['14.0', 'high', '1.0'],
['26.0', 'high', '1.0']],
dtype='|S4')
>>> km4 = KaplanMeier(dta,0,exog=1,censoring=2)
>>> km4.fit()
"""
def __init__(self, data, endog, exog=None, censoring=None):
self.exog = exog
self.censoring = censoring
cols = [endog]
self.endog = 0
if exog != None:
cols.append(exog)
self.exog = 1
if censoring != None:
cols.append(censoring)
if exog != None:
self.censoring = 2
else:
self.censoring = 1
data = data[:,cols]
if data.dtype == float or data.dtype == int:
self.data = data[~np.isnan(data).any(1)]
else:
t = (data[:,self.endog]).astype(float)
if exog != None:
evec = data[:,self.exog]
evec = evec[~np.isnan(t)]
if censoring != None:
cvec = (data[:,self.censoring]).astype(float)
cvec = cvec[~np.isnan(t)]
t = t[~np.isnan(t)]
if censoring != None:
t = t[~np.isnan(cvec)]
if exog != None:
evec = evec[~np.isnan(cvec)]
cvec = cvec[~np.isnan(cvec)]
cols = [t]
if exog != None:
cols.append(evec)
if censoring != None:
cols.append(cvec)
data = (np.array(cols)).transpose()
self.data = data
def fit(self):
"""
Calculate the Kaplan-Meier estimator of the survival function
"""
self.results = []
self.ts = []
self.censorings = []
self.event = []
if self.exog == None:
self.fitting_proc(self.data)
else:
groups = np.unique(self.data[:,self.exog])
self.groups = groups
for g in groups:
group = self.data[self.data[:,self.exog] == g]
self.fitting_proc(group)
def plot(self):
"""
Plot the estimated survival curves. After using this method
do
plt.show()
to display the plot
"""
plt.figure()
if self.exog == None:
self.plotting_proc(0)
else:
for g in range(len(self.groups)):
self.plotting_proc(g)
plt.ylim(ymax=1.05)
plt.ylabel('Survival')
plt.xlabel('Time')
def summary(self):
"""
Print a set of tables containing the estimates of the survival
function, and its standard errors
"""
if self.exog == None:
self.summary_proc(0)
else:
for g in range(len(self.groups)):
self.summary_proc(g)
def fitting_proc(self, group):
"""
For internal use
"""
t = ((group[:,self.endog]).astype(float)).astype(int)
if self.censoring == None:
events = np.bincount(t)
t = np.unique(t)
events = events[:,list(t)]
events = events.astype(float)
eventsSum = np.cumsum(events)
eventsSum = np.r_[0,eventsSum]
n = len(group) - eventsSum[:-1]
else:
censoring = ((group[:,self.censoring]).astype(float)).astype(int)
reverseCensoring = -1*(censoring - 1)
events = np.bincount(t,censoring)
censored = np.bincount(t,reverseCensoring)
t = np.unique(t)
censored = censored[:,list(t)]
censored = censored.astype(float)
censoredSum = np.cumsum(censored)
censoredSum = np.r_[0,censoredSum]
events = events[:,list(t)]
events = events.astype(float)
eventsSum = np.cumsum(events)
eventsSum = np.r_[0,eventsSum]
n = len(group) - eventsSum[:-1] - censoredSum[:-1]
(self.censorings).append(censored)
survival = np.cumprod(1-events/n)
var = ((survival*survival) *
np.cumsum(events/(n*(n-events))))
se = np.sqrt(var)
(self.results).append(np.array([survival,se]))
(self.ts).append(t)
(self.event).append(events)
def plotting_proc(self, g):
"""
For internal use
"""
survival = self.results[g][0]
t = self.ts[g]
e = (self.event)[g]
if self.censoring != None:
c = self.censorings[g]
csurvival = survival[c != 0]
ct = t[c != 0]
if len(ct) != 0:
plt.vlines(ct,csurvival+0.02,csurvival-0.02)
x = np.repeat(t[e != 0], 2)
y = np.repeat(survival[e != 0], 2)
if self.ts[g][-1] in t[e != 0]:
x = np.r_[0,x]
y = np.r_[1,1,y[:-1]]
else:
x = np.r_[0,x,self.ts[g][-1]]
y = np.r_[1,1,y]
plt.plot(x,y)
def summary_proc(self, g):
"""
For internal use
"""
if self.exog != None:
myTitle = ('exog = ' + str(self.groups[g]) + '\n')
else:
myTitle = "Kaplan-Meier Curve"
table = np.transpose(self.results[g])
table = np.c_[np.transpose(self.ts[g]),table]
table = SimpleTable(table, headers=['Time','Survival','Std. Err'],
title = myTitle)
print(table)
def test_diff(self, groups, rho=None, weight=None):
"""
test_diff(groups, rho=0)
Test for difference between survival curves
Parameters
----------
groups: A list of the values for exog to test for difference.
tests the null hypothesis that the survival curves for all
values of exog in groups are equal
rho: compute the test statistic with weight S(t)^rho, where
S(t) is the pooled estimate for the Kaplan-Meier survival function.
If rho = 0, this is the logrank test, if rho = 0, this is the
Peto and Peto modification to the Gehan-Wilcoxon test.
weight: User specified function that accepts as its sole arguement
an array of times, and returns an array of weights for each time
to be used in the test
Returns
-------
An array whose zeroth element is the chi-square test statistic for
the global null hypothesis, that all survival curves are equal,
the index one element is degrees of freedom for the test, and the
index two element is the p-value for the test.
Examples
--------
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from statsmodels.sandbox.survival2 import KaplanMeier
>>> dta = sm.datasets.strikes.load()
>>> dta = dta.values()[-1]
>>> censoring = np.ones_like(dta[:,0])
>>> censoring[dta[:,0] > 80] = 0
>>> dta = np.c_[dta,censoring]
>>> km = KaplanMeier(dta,0,exog=1,censoring=2)
>>> km.fit()
Test for difference of survival curves
>>> log_rank = km3.test_diff([0.0645,-0.03957])
The zeroth element of log_rank is the chi-square test statistic
for the difference between the survival curves using the log rank test
for exog = 0.0645 and exog = -0.03957, the index one element
is the degrees of freedom for the test, and the index two element
is the p-value for the test
>>> wilcoxon = km.test_diff([0.0645,-0.03957], rho=1)
wilcoxon is the equivalent information as log_rank, but for the
Peto and Peto modification to the Gehan-Wilcoxon test.
User specified weight functions
>>> log_rank = km3.test_diff([0.0645,-0.03957], weight=np.ones_like)
This is equivalent to the log rank test
More than two groups
>>> log_rank = km.test_diff([0.0645,-0.03957,0.01138])
The test can be performed with arbitrarily many groups, so long as
they are all in the column exog
"""
groups = np.asarray(groups)
if self.exog == None:
raise ValueError("Need an exogenous variable for logrank test")
elif (np.in1d(groups,self.groups)).all():
data = self.data[np.in1d(self.data[:,self.exog],groups)]
t = ((data[:,self.endog]).astype(float)).astype(int)
tind = np.unique(t)
NK = []
N = []
D = []
Z = []
if rho != None and weight != None:
raise ValueError("Must use either rho or weights, not both")
elif rho != None:
s = KaplanMeier(data,self.endog,censoring=self.censoring)
s.fit()
s = (s.results[0][0]) ** (rho)
s = np.r_[1,s[:-1]]
elif weight != None:
s = weight(tind)
else:
s = np.ones_like(tind)
if self.censoring == None:
for g in groups:
dk = np.bincount((t[data[:,self.exog] == g]))
d = np.bincount(t)
if np.max(tind) != len(dk):
dif = np.max(tind) - len(dk) + 1
dk = np.r_[dk,[0]*dif]
dk = dk[:,list(tind)]
d = d[:,list(tind)]
dk = dk.astype(float)
d = d.astype(float)
dkSum = np.cumsum(dk)
dSum = np.cumsum(d)
dkSum = np.r_[0,dkSum]
dSum = np.r_[0,dSum]
nk = len(data[data[:,self.exog] == g]) - dkSum[:-1]
n = len(data) - dSum[:-1]
d = d[n>1]
dk = dk[n>1]
nk = nk[n>1]
n = n[n>1]
s = s[n>1]
ek = (nk * d)/(n)
Z.append(np.sum(s * (dk - ek)))
NK.append(nk)
N.append(n)
D.append(d)
else:
for g in groups:
censoring = ((data[:,self.censoring]).astype(float)).astype(int)
reverseCensoring = -1*(censoring - 1)
censored = np.bincount(t,reverseCensoring)
ck = np.bincount((t[data[:,self.exog] == g]),
reverseCensoring[data[:,self.exog] == g])
dk = np.bincount((t[data[:,self.exog] == g]),
censoring[data[:,self.exog] == g])
d = np.bincount(t,censoring)
if np.max(tind) != len(dk):
dif = np.max(tind) - len(dk) + 1
dk = np.r_[dk,[0]*dif]
ck = np.r_[ck,[0]*dif]
dk = dk[:,list(tind)]
ck = ck[:,list(tind)]
d = d[:,list(tind)]
dk = dk.astype(float)
d = d.astype(float)
ck = ck.astype(float)
dkSum = np.cumsum(dk)
dSum = np.cumsum(d)
ck = np.cumsum(ck)
ck = np.r_[0,ck]
dkSum = np.r_[0,dkSum]
dSum = np.r_[0,dSum]
censored = censored[:,list(tind)]
censored = censored.astype(float)
censoredSum = np.cumsum(censored)
censoredSum = np.r_[0,censoredSum]
nk = (len(data[data[:,self.exog] == g]) - dkSum[:-1]
- ck[:-1])
n = len(data) - dSum[:-1] - censoredSum[:-1]
d = d[n>1]
dk = dk[n>1]
nk = nk[n>1]
n = n[n>1]
s = s[n>1]
ek = (nk * d)/(n)
Z.append(np.sum(s * (dk - ek)))
NK.append(nk)
N.append(n)
D.append(d)
Z = np.array(Z)
N = np.array(N)
D = np.array(D)
NK = np.array(NK)
sigma = -1 * np.dot((NK/N) * ((N - D)/(N - 1)) * D
* np.array([(s ** 2)]*len(D))
,np.transpose(NK/N))
np.fill_diagonal(sigma, np.diagonal(np.dot((NK/N)
* ((N - D)/(N - 1)) * D
* np.array([(s ** 2)]*len(D))
,np.transpose(1 - (NK/N)))))
chisq = np.dot(np.transpose(Z),np.dot(la.pinv(sigma), Z))
df = len(groups) - 1
return np.array([chisq, df, stats.chi2.sf(chisq,df)])
else:
raise ValueError("groups must be in column exog")
| bsd-3-clause |
astropy/astropy | astropy/visualization/mpl_normalize.py | 5 | 15298 | """
Normalization class for Matplotlib that can be used to produce
colorbars.
"""
import inspect
import warnings
import numpy as np
from numpy import ma
from .interval import (PercentileInterval, AsymmetricPercentileInterval,
ManualInterval, MinMaxInterval, BaseInterval)
from .stretch import (LinearStretch, SqrtStretch, PowerStretch, LogStretch,
AsinhStretch, BaseStretch)
from ..utils.exceptions import AstropyDeprecationWarning
try:
import matplotlib # pylint: disable=W0611
from matplotlib.colors import Normalize
from matplotlib import pyplot as plt
except ImportError:
class Normalize:
def __init__(self, *args, **kwargs):
raise ImportError('matplotlib is required in order to use this '
'class.')
__all__ = ['ImageNormalize', 'simple_norm', 'imshow_norm']
__doctest_requires__ = {'*': ['matplotlib']}
class ImageNormalize(Normalize):
"""
Normalization class to be used with Matplotlib.
Parameters
----------
data : ndarray, optional
The image array. This input is used only if ``interval`` is
also input. ``data`` and ``interval`` are used to compute the
vmin and/or vmax values only if ``vmin`` or ``vmax`` are not
input.
interval : `~astropy.visualization.BaseInterval` subclass instance, optional
The interval object to apply to the input ``data`` to determine
the ``vmin`` and ``vmax`` values. This input is used only if
``data`` is also input. ``data`` and ``interval`` are used to
compute the vmin and/or vmax values only if ``vmin`` or ``vmax``
are not input.
vmin, vmax : float, optional
The minimum and maximum levels to show for the data. The
``vmin`` and ``vmax`` inputs override any calculated values from
the ``interval`` and ``data`` inputs.
stretch : `~astropy.visualization.BaseStretch` subclass instance
The stretch object to apply to the data. The default is
`~astropy.visualization.LinearStretch`.
clip : bool, optional
If `True`, data values outside the [0:1] range are clipped to
the [0:1] range.
invalid : None or float, optional
Value to assign NaN values generated by this class. NaNs in the
input ``data`` array are not changed. For matplotlib
normalization, the ``invalid`` value should map to the
matplotlib colormap "under" value (i.e., any finite value < 0).
If `None`, then NaN values are not replaced. This keyword has
no effect if ``clip=True``.
"""
def __init__(self, data=None, interval=None, vmin=None, vmax=None,
stretch=LinearStretch(), clip=False, invalid=-1.0):
# this super call checks for matplotlib
super().__init__(vmin=vmin, vmax=vmax, clip=clip)
self.vmin = vmin
self.vmax = vmax
if stretch is None:
raise ValueError('stretch must be input')
if not isinstance(stretch, BaseStretch):
raise TypeError('stretch must be an instance of a BaseStretch '
'subclass')
self.stretch = stretch
if interval is not None and not isinstance(interval, BaseInterval):
raise TypeError('interval must be an instance of a BaseInterval '
'subclass')
self.interval = interval
self.inverse_stretch = stretch.inverse
self.clip = clip
self.invalid = invalid
# Define vmin and vmax if not None and data was input
if data is not None:
self._set_limits(data)
def _set_limits(self, data):
if self.vmin is not None and self.vmax is not None:
return
# Define vmin and vmax from the interval class if not None
if self.interval is None:
if self.vmin is None:
self.vmin = np.min(data[np.isfinite(data)])
if self.vmax is None:
self.vmax = np.max(data[np.isfinite(data)])
else:
_vmin, _vmax = self.interval.get_limits(data)
if self.vmin is None:
self.vmin = _vmin
if self.vmax is None:
self.vmax = _vmax
def __call__(self, values, clip=None, invalid=None):
"""
Transform values using this normalization.
Parameters
----------
values : array-like
The input values.
clip : bool, optional
If `True`, values outside the [0:1] range are clipped to the
[0:1] range. If `None` then the ``clip`` value from the
`ImageNormalize` instance is used (the default of which is
`False`).
invalid : None or float, optional
Value to assign NaN values generated by this class. NaNs in
the input ``data`` array are not changed. For matplotlib
normalization, the ``invalid`` value should map to the
matplotlib colormap "under" value (i.e., any finite value <
0). If `None`, then the `ImageNormalize` instance value is
used. This keyword has no effect if ``clip=True``.
"""
if clip is None:
clip = self.clip
if invalid is None:
invalid = self.invalid
if isinstance(values, ma.MaskedArray):
if clip:
mask = False
else:
mask = values.mask
values = values.filled(self.vmax)
else:
mask = False
# Make sure scalars get broadcast to 1-d
if np.isscalar(values):
values = np.array([values], dtype=float)
else:
# copy because of in-place operations after
values = np.array(values, copy=True, dtype=float)
# Define vmin and vmax if not None
self._set_limits(values)
# Normalize based on vmin and vmax
np.subtract(values, self.vmin, out=values)
np.true_divide(values, self.vmax - self.vmin, out=values)
# Clip to the 0 to 1 range
if clip:
values = np.clip(values, 0., 1., out=values)
# Stretch values
if self.stretch._supports_invalid_kw:
values = self.stretch(values, out=values, clip=False,
invalid=invalid)
else:
values = self.stretch(values, out=values, clip=False)
# Convert to masked array for matplotlib
return ma.array(values, mask=mask)
def inverse(self, values, invalid=None):
# Find unstretched values in range 0 to 1
if self.inverse_stretch._supports_invalid_kw:
values_norm = self.inverse_stretch(values, clip=False,
invalid=invalid)
else:
values_norm = self.inverse_stretch(values, clip=False)
# Scale to original range
return values_norm * (self.vmax - self.vmin) + self.vmin
def simple_norm(data, stretch='linear', power=1.0, asinh_a=0.1, min_cut=None,
max_cut=None, min_percent=None, max_percent=None,
percent=None, clip=False, log_a=1000, invalid=-1.0):
"""
Return a Normalization class that can be used for displaying images
with Matplotlib.
This function enables only a subset of image stretching functions
available in `~astropy.visualization.mpl_normalize.ImageNormalize`.
This function is used by the
``astropy.visualization.scripts.fits2bitmap`` script.
Parameters
----------
data : ndarray
The image array.
stretch : {'linear', 'sqrt', 'power', log', 'asinh'}, optional
The stretch function to apply to the image. The default is
'linear'.
power : float, optional
The power index for ``stretch='power'``. The default is 1.0.
asinh_a : float, optional
For ``stretch='asinh'``, the value where the asinh curve
transitions from linear to logarithmic behavior, expressed as a
fraction of the normalized image. Must be in the range between
0 and 1. The default is 0.1.
min_cut : float, optional
The pixel value of the minimum cut level. Data values less than
``min_cut`` will set to ``min_cut`` before stretching the image.
The default is the image minimum. ``min_cut`` overrides
``min_percent``.
max_cut : float, optional
The pixel value of the maximum cut level. Data values greater
than ``min_cut`` will set to ``min_cut`` before stretching the
image. The default is the image maximum. ``max_cut`` overrides
``max_percent``.
min_percent : float, optional
The percentile value used to determine the pixel value of
minimum cut level. The default is 0.0. ``min_percent``
overrides ``percent``.
max_percent : float, optional
The percentile value used to determine the pixel value of
maximum cut level. The default is 100.0. ``max_percent``
overrides ``percent``.
percent : float, optional
The percentage of the image values used to determine the pixel
values of the minimum and maximum cut levels. The lower cut
level will set at the ``(100 - percent) / 2`` percentile, while
the upper cut level will be set at the ``(100 + percent) / 2``
percentile. The default is 100.0. ``percent`` is ignored if
either ``min_percent`` or ``max_percent`` is input.
clip : bool, optional
If `True`, data values outside the [0:1] range are clipped to
the [0:1] range.
log_a : float, optional
The log index for ``stretch='log'``. The default is 1000.
invalid : None or float, optional
Value to assign NaN values generated by the normalization. NaNs
in the input ``data`` array are not changed. For matplotlib
normalization, the ``invalid`` value should map to the
matplotlib colormap "under" value (i.e., any finite value < 0).
If `None`, then NaN values are not replaced. This keyword has
no effect if ``clip=True``.
Returns
-------
result : `ImageNormalize` instance
An `ImageNormalize` instance that can be used for displaying
images with Matplotlib.
"""
if percent is not None:
interval = PercentileInterval(percent)
elif min_percent is not None or max_percent is not None:
interval = AsymmetricPercentileInterval(min_percent or 0.,
max_percent or 100.)
elif min_cut is not None or max_cut is not None:
interval = ManualInterval(min_cut, max_cut)
else:
interval = MinMaxInterval()
if stretch == 'linear':
stretch = LinearStretch()
elif stretch == 'sqrt':
stretch = SqrtStretch()
elif stretch == 'power':
stretch = PowerStretch(power)
elif stretch == 'log':
stretch = LogStretch(log_a)
elif stretch == 'asinh':
stretch = AsinhStretch(asinh_a)
else:
raise ValueError(f'Unknown stretch: {stretch}.')
vmin, vmax = interval.get_limits(data)
return ImageNormalize(vmin=vmin, vmax=vmax, stretch=stretch, clip=clip,
invalid=invalid)
# used in imshow_norm
_norm_sig = inspect.signature(ImageNormalize)
def imshow_norm(data, ax=None, imshow_only_kwargs={}, **kwargs):
""" A convenience function to call matplotlib's `matplotlib.pyplot.imshow`
function, using an `ImageNormalize` object as the normalization.
Parameters
----------
data : 2D or 3D array-like
The data to show. Can be whatever `~matplotlib.pyplot.imshow` and
`ImageNormalize` both accept. See `~matplotlib.pyplot.imshow`.
ax : None or `~matplotlib.axes.Axes`, optional
If None, use pyplot's imshow. Otherwise, calls ``imshow`` method of
the supplied axes.
imshow_only_kwargs : dict, optional
Deprecated since Astropy v4.1. Note that settting both ``norm``
and ``vmin/vmax`` is deprecated in ``matplotlib >= 3.3``.
Arguments to be passed directly to `~matplotlib.pyplot.imshow` without
first trying `ImageNormalize`. This is only for keywords that have the
same name in both `ImageNormalize` and `~matplotlib.pyplot.imshow` - if
you want to set the `~matplotlib.pyplot.imshow` keywords only, supply
them in this dictionary.
kwargs : dict, optional
All other keyword arguments are parsed first by the
`ImageNormalize` initializer, then to
`~matplotlib.pyplot.imshow`.
Returns
-------
result : tuple
A tuple containing the `~matplotlib.image.AxesImage` generated
by `~matplotlib.pyplot.imshow` as well as the `ImageNormalize`
instance.
Notes
-----
The ``norm`` matplotlib keyword is not supported.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import (imshow_norm, MinMaxInterval,
SqrtStretch)
# Generate and display a test image
image = np.arange(65536).reshape((256, 256))
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
im, norm = imshow_norm(image, ax, origin='lower',
interval=MinMaxInterval(),
stretch=SqrtStretch())
fig.colorbar(im)
"""
if imshow_only_kwargs:
warnings.warn('imshow_only_kwargs is deprecated since v4.1 and will '
'be removed in a future version.',
AstropyDeprecationWarning)
if 'X' in kwargs:
raise ValueError('Cannot give both ``X`` and ``data``')
if 'norm' in kwargs:
raise ValueError('There is no point in using imshow_norm if you give '
'the ``norm`` keyword - use imshow directly if you '
'want that.')
imshow_kwargs = dict(kwargs)
norm_kwargs = {'data': data}
for pname in _norm_sig.parameters:
if pname in kwargs:
norm_kwargs[pname] = imshow_kwargs.pop(pname)
for k, v in imshow_only_kwargs.items():
if k not in _norm_sig.parameters:
# the below is not strictly "has to be true", but is here so that
# users don't start using both imshow_only_kwargs *and* keyword
# arguments to this function, as that makes for more confusing
# user code
raise ValueError('You provided a keyword to imshow_only_kwargs '
'({}) that is not a keyword for ImageNormalize. '
'This is not supported. Instead you should '
'pass the keyword directly into imshow_norm'
.format(k))
imshow_kwargs[k] = v
imshow_kwargs['norm'] = ImageNormalize(**norm_kwargs)
if ax is None:
imshow_result = plt.imshow(data, **imshow_kwargs)
else:
imshow_result = ax.imshow(data, **imshow_kwargs)
return imshow_result, imshow_kwargs['norm']
| bsd-3-clause |
zuotingbing/spark | python/pyspark/testing/sqlutils.py | 9 | 7813 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import shutil
import tempfile
from contextlib import contextmanager
from pyspark.sql import SparkSession
from pyspark.sql.types import ArrayType, DoubleType, UserDefinedType, Row
from pyspark.testing.utils import ReusedPySparkTestCase
from pyspark.util import _exception_message
pandas_requirement_message = None
try:
from pyspark.sql.pandas.utils import require_minimum_pandas_version
require_minimum_pandas_version()
except ImportError as e:
# If Pandas version requirement is not satisfied, skip related tests.
pandas_requirement_message = _exception_message(e)
pyarrow_requirement_message = None
try:
from pyspark.sql.pandas.utils import require_minimum_pyarrow_version
require_minimum_pyarrow_version()
except ImportError as e:
# If Arrow version requirement is not satisfied, skip related tests.
pyarrow_requirement_message = _exception_message(e)
test_not_compiled_message = None
try:
from pyspark.sql.utils import require_test_compiled
require_test_compiled()
except Exception as e:
test_not_compiled_message = _exception_message(e)
have_pandas = pandas_requirement_message is None
have_pyarrow = pyarrow_requirement_message is None
test_compiled = test_not_compiled_message is None
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.ZERO = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return 'pyspark.sql.tests'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.test.ExamplePointUDT'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Scala, Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
@staticmethod
def foo():
pass
@property
def props(self):
return {}
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT()
class MyObject(object):
def __init__(self, key, value):
self.key = key
self.value = value
class SQLTestUtils(object):
"""
This util assumes the instance of this to have 'spark' attribute, having a spark session.
It is usually used with 'ReusedSQLTestCase' class but can be used if you feel sure the
the implementation of this class has 'spark' attribute.
"""
@contextmanager
def sql_conf(self, pairs):
"""
A convenient context manager to test some configuration specific logic. This sets
`value` to the configuration `key` and then restores it back when it exits.
"""
assert isinstance(pairs, dict), "pairs should be a dictionary."
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
keys = pairs.keys()
new_values = pairs.values()
old_values = [self.spark.conf.get(key, None) for key in keys]
for key, new_value in zip(keys, new_values):
self.spark.conf.set(key, new_value)
try:
yield
finally:
for key, old_value in zip(keys, old_values):
if old_value is None:
self.spark.conf.unset(key)
else:
self.spark.conf.set(key, old_value)
@contextmanager
def database(self, *databases):
"""
A convenient context manager to test with some specific databases. This drops the given
databases if it exists and sets current database to "default" when it exits.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for db in databases:
self.spark.sql("DROP DATABASE IF EXISTS %s CASCADE" % db)
self.spark.catalog.setCurrentDatabase("default")
@contextmanager
def table(self, *tables):
"""
A convenient context manager to test with some specific tables. This drops the given tables
if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for t in tables:
self.spark.sql("DROP TABLE IF EXISTS %s" % t)
@contextmanager
def tempView(self, *views):
"""
A convenient context manager to test with some specific views. This drops the given views
if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for v in views:
self.spark.catalog.dropTempView(v)
@contextmanager
def function(self, *functions):
"""
A convenient context manager to test with some specific functions. This drops the given
functions if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for f in functions:
self.spark.sql("DROP FUNCTION IF EXISTS %s" % f)
class ReusedSQLTestCase(ReusedPySparkTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
super(ReusedSQLTestCase, cls).setUpClass()
cls.spark = SparkSession(cls.sc)
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.spark.createDataFrame(cls.testData)
@classmethod
def tearDownClass(cls):
super(ReusedSQLTestCase, cls).tearDownClass()
cls.spark.stop()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
| apache-2.0 |
gcallah/Indra | setup.py | 1 | 1286 | from setuptools import setup, find_packages
setup(name='indras_net',
version='2.0.6',
description='A framework for agent-based modeling in Python.',
url='https://github.com/gcallah/indras_net.git',
author='Gene Callahan and Nathan Conroy',
author_email='[email protected]',
license='GNU',
zip_safe=False,
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*",
"tests"]),
install_requires=[
"networkx",
"numpy", 'propargs', 'matplotlib'
],
test_suite="",
entry_points={
"console_scripts": ['indra = indra.__main__:main']
},
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| gpl-3.0 |
gbrammer/grizli | grizli/galfit/deconvolve.py | 1 | 15242 | #!/usr/bin/env python
# encoding: utf-8
"""
deconvolve.py
GALFIT "deconvolution" as in Szomoru et al.
https://iopscience.iop.org/article/10.1088/2041-8205/714/2/L244
"""
import numpy as np
import astropy.io.fits as pyfits
import astropy.stats
import astropy.table
def test():
im = pyfits.open('/tmp/gf_out.fits')
model_hdu = im[2]
residuals = im[3].data
_x = galfit_deconvolve(model_hdu, residuals, rms=None)
model_hdu = _x['model']
residuals = _x['resid'].data
imshow_args = {'vmin': -5, 'vmax': 5, 'cmap': 'viridis'}
_ = galfit_deconvolve(model_hdu, residuals, rms=_x['rms'], imshow_args=imshow_args)
def galfit_deconvolve(model_hdu, residuals, rms=None, mask=None, oversample=8, comp='1', xprof=np.append(0, np.arange(2, 20)), y_func=np.mean, cumul_values=[0.5, 0.8], make_plot=True, imshow_args={'vmin': -5, 'vmax': 5, 'cmap': 'viridis'}, plt_kwargs={'linestyle': 'steps-mid', 'color': 'r', 'alpha': 0.8}, npix=20, psf_offset=[1, 1]):
"""
Deconvolve an image using a galfit Sersic model
`model_hdu` is the HDU output from GALFIT containing the model and model
parameters (e.g, out.fits[2]).
`residuals` are the fit residuals (out.fits[3].data)
`psf_offset` is the offset of the PSF relative to the central pixel of
the PSF array.
Returns:
R = radii evaluated at pixel positions for the ellipse parameters
phi = position angle around the ellipse, radians
prof = 2D array of unconvolved Sersic profile
params = Galfit params
tab = table of the 1D profile
fig = optional figure if `make_plot=True`
"""
import matplotlib.pyplot as plt
#model_hdu = _x['model']
#residuals = _x['resid']
_h = model_hdu.header
shape = residuals.shape
if mask is None:
mask = np.isfinite(residuals)
# Ellipse parameters and sersic profile
R, phi, prof, params = sersic_profile(shape, oversample=oversample,
gf_header=_h, comp=comp,
psf_offset=psf_offset)
so = np.argsort(R[mask].flatten())
Rso = R[mask].flatten()[so]
data = residuals + model_hdu.data
if '1_SKY' in _h:
sky = gf_header_key(_h, '1_SKY')
else:
sky = 0.
##########
# 1D averaged profiles
integ = False
# Original data
_, ydata, _, _ = running_median(Rso,
(data)[mask].flatten()[so].astype(np.float)-sky,
bins=xprof, use_median=False, y_func=y_func, integrate=integ)
# Convolved model
_, ymodel, _, _ = running_median(Rso,
(model_hdu.data)[mask].flatten()[so].astype(np.float),
bins=xprof, use_median=False, y_func=y_func, integrate=integ)
# Sersic profile
_, yprof, _, _ = running_median(Rso,
(prof)[mask].flatten()[so].astype(np.float),
bins=xprof, use_median=False, y_func=y_func, integrate=integ)
# Sersic + residuals
xm, ydeconv, ys, yn = running_median(Rso,
(prof + residuals)[mask].flatten()[so].astype(np.float),
bins=xprof, use_median=False, y_func=y_func, integrate=integ)
# Sum for total normalizatioin
_, ydeconv_sum, _, _ = running_median(Rso,
(prof + residuals)[mask].flatten()[so].astype(np.float),
bins=xprof, use_median=False, y_func=np.sum, integrate=False)
# Variance
if rms is not None:
xmx, yv, yvs, yn = running_median(Rso,
rms[mask].flatten()[so].astype(np.float)**2,
bins=xprof, use_median=False, y_func=np.sum)
yrms = np.sqrt(yv)/yn
im_norm = rms
# weighted
xmx, ynum_model, yvs, yn = running_median(Rso,
(model_hdu.data/rms**2)[mask].flatten()[so].astype(np.float),
bins=xprof, use_median=False, y_func=np.sum)
xmx, ynum, yvs, yn = running_median(Rso,
(data/rms**2)[mask].flatten()[so].astype(np.float),
bins=xprof, use_median=False, y_func=np.sum)
xmx, yden, yvs, yn = running_median(Rso,
(1./rms**2)[mask].flatten()[so].astype(np.float),
bins=xprof, use_median=False, y_func=np.sum)
yweight = ynum/yden
yweight_model = ynum_model/yden
yweight_err = 1./np.sqrt(yden)
else:
yrms = ys
im_norm = 1
yweight = None
dx = np.diff(xprof)
xpix = xprof[1:]-dx/2.
if False:
# Scale
yscl = 1.
#yscl = prof[msk].sum()/np.trapz(2*np.pi*yprof*xpix, xpix)
yprof *= yscl
ydeconv *= yscl
yrms *= yscl
# Interpolate Radii from the cumulative flux distribution
cumul_flux = np.cumsum((prof + residuals)[mask].flatten()[so])
#total = cumflux.max()
total = ydeconv_sum.sum()
Rcumul = np.interp(cumul_values, cumul_flux/total, Rso)
tab = astropy.table.Table()
tab['xpix'] = xpix
tab['yprofile'] = yprof
tab['ydeconv'] = ydeconv
tab['yrms'] = yrms
tab['ydata'] = ydata
tab['ymodel'] = ymodel
if yweight is not None:
tab['yweight'] = yweight
tab['yweight_model'] = yweight_model
tab['yweight_err'] = yweight_err
tab.meta['total_flux'] = total
for k in params:
tab.meta[k] = params[k]
for i, r in enumerate(cumul_values):
tab.meta['R{0}'.format(int(r*100))] = Rcumul[i]
params['Rcumul'] = Rcumul
if make_plot:
fig = plt.figure(figsize=(12, 3))
ax = fig.add_subplot(141)
ax2 = fig.add_subplot(142)
ax3 = fig.add_subplot(143)
ax4 = fig.add_subplot(144)
ax4.loglog()
ax4.grid()
xc = gf_header_key(_h, comp+'_XC')
yc = gf_header_key(_h, comp+'_YC')
ax.imshow(data/im_norm*mask, **imshow_args)
ax2.imshow(residuals/im_norm*mask, **imshow_args)
ax3.imshow(prof/im_norm*mask, **imshow_args)
label = 'Re={re:.2f}, n={n:.1f}'.format(**params)
pl = ax4.plot(xpix, yprof, label=label, **plt_kwargs)
ax4.errorbar(xpix, ydeconv, yrms, linestyle='None',
color=pl[0].get_color(), alpha=0.5, marker='.')
ax4.vlines(params['re'], 1e-10, 1e10, color=pl[0].get_color(), alpha=0.5)
ax4.vlines(Rcumul, 1e-10, 1e10, color=pl[0].get_color(), alpha=0.5, linewidth=3, linestyle='--')
ax4.scatter(Rso,
(prof + residuals)[mask].flatten()[so].astype(np.float)-sky,
marker='.', color='k', alpha=0.1, zorder=-1000)
# ax4.scatter(Rso,
# (data)[mask].flatten()[so].astype(np.float)-sky,
# marker='.', color='r', alpha=0.1, zorder=-1000)
ax4.legend()
valid = np.isfinite(ydeconv) & np.isfinite(yprof)
print(ydeconv[valid].min(), ydeconv[valid].max())
try:
ax4.set_ylim(np.maximum(0.5*yprof[valid].min(), 1e-5*ydeconv[valid].max()),
2*ydeconv[valid].max())
except:
pass
ax4.set_xlim(0.05, xprof.max()+2)
for a in [ax, ax2, ax3]:
a.set_xlim(xc-1-npix, xc-1+npix)
a.set_ylim(yc-1-npix, yc-1+npix)
a.set_xticklabels([])
a.set_yticklabels([])
# Show ellipses on images
a.plot(*r_ellipse(Rcumul[0], psf_offset=psf_offset, **params),
color='w', alpha=0.9, linewidth=2)
a.plot(*r_ellipse(xpix.max(), psf_offset=psf_offset, **params),
color='w', alpha=0.9, linewidth=2)
a.plot(*r_ellipse(Rcumul[0], psf_offset=psf_offset, **params),
color=pl[0].get_color(), alpha=0.5)
a.plot(*r_ellipse(xpix.max(), psf_offset=psf_offset, **params),
color=pl[0].get_color(), alpha=0.5, linestyle='--')
fig.tight_layout(pad=0.1)
else:
fig = None
return R, phi, prof, params, tab, fig
def get_kappa(n, **kwargs):
"""
Compute the Kappa parameter for a given n index as in the Galfit
definition
"""
from scipy.optimize import root
x0 = 2*n-0.33
args = (n)
k = root(kappa_func, x0, args=args, **kwargs)
return k.x[0]
def kappa_func(kappa, n):
"""
Function for getting Sersic kappa
"""
from scipy.special import gamma, gammainc
f = gamma(2*n)-2*gammainc(2*n, kappa)*gamma(2*n)
return f
def Rc(c0):
"""
Shape parameter
"""
from scipy.special import beta
return np.pi*(c0+2)/(4*beta(1./(c0+2), 1+1./(c0+2)))
def sigma_e(re, n, q, Ftot=1., c0=0.):
"""
Surface brightess at the effective radius, re, given total flux
"""
from scipy.special import gamma
kap = get_kappa(n)
return Ftot/(2*np.pi*re**2*np.exp(kap)*n*kap**(-2*n)*gamma(2*n)*q/Rc(c0)), kap
def gf_header_key(header, key='2_XC'):
"""
Get a header keyword from a GALFIT header, which may have [] and *
in the keyword value
"""
return float(header[key].split()[0].strip('[]').strip('*').strip('{').strip('}'))
def sersic_profile(shape, mag=20., xc=[0., 0.], n=1., q=0.5, pa=0, re=1., ZP=26., gf_header=None, comp='2', verbose=True, oversample=8, psf_offset=[1, 1]):
"""
Generate a Sersic profile with Galfit parameters within a defined image
shape.
Specify the parameters individually or provide a GALFIT model header
gf_header: FITS header of a GALFIT output model
comp: Number of the object in the GALFIT model
"""
import scipy.ndimage as nd
if gf_header is not None:
xx = gf_header_key(gf_header, comp+'_XC')
yy = gf_header_key(gf_header, comp+'_YC')
xc = np.array([xx, yy])
mag = gf_header_key(gf_header, comp+'_MAG')
if comp+'_N' in gf_header:
n = gf_header_key(gf_header, comp+'_N')
q = gf_header_key(gf_header, comp+'_AR')
pa = gf_header_key(gf_header, comp+'_PA')
re = gf_header_key(gf_header, comp+'_RE')
else:
n = 1.
q = 1.
pa = 0.
re = 0.01
if verbose:
print(f'xc:{xc}, q:{q}, pa:{pa}, n:{n}')
if 'MAGZPT' in gf_header:
ZP = gf_header['MAGZPT']
params = {'mag': mag, 'xc': xc, 're': re, 'n': n, 'q': q, 'pa': pa}
sigm, kap = sigma_e(re, n, q)
norm = sigm*10**(-0.4*(mag-ZP))
R, x, phi = pix_to_r(shape, verbose=verbose, psf_offset=psf_offset,
**params)
if oversample > 1:
Ro, x, phio = pix_to_r(shape, oversample=oversample, verbose=verbose,
psf_offset=psf_offset, **params)
sersic_large = norm*np.exp(-kap*((Ro/re)**(1./n)-1))
kern = np.ones((oversample, oversample))/oversample**2
sersic_profile = nd.convolve(sersic_large, kern)[oversample//2-1::oversample, oversample//2-1::oversample]
else:
sersic_profile = norm*np.exp(-kap*((R/re)**(1./n)-1))
return R, phi, sersic_profile, params
def r_ellipse(radius=5, xc=[0., 0.], q=0.5, pa=0, re=1., gf_header=None, comp='2', verbose=True, nstep=256, psf_offset=[1, 1], **kwargs):
"""
Make x, y coordinates given ellipse parameters
"""
if gf_header is not None:
xx = gf_header_key(gf_header, comp+'_XC')
yy = gf_header_key(gf_header, comp+'_YC')
xc = np.array([xx, yy])
mag = gf_header_key(gf_header, comp+'_MAG')
if comp+'_N' in gf_header:
n = gf_header_key(gf_header, comp+'_N')
q = gf_header_key(gf_header, comp+'_AR')
pa = gf_header_key(gf_header, comp+'_PA')
re = gf_header_key(gf_header, comp+'_RE')
else:
n = 1.
q = 1.
pa = 0.
re = 0.01
if verbose:
print(f'xc:{xc}, q:{q}, pa:{pa}')
phi = np.linspace(0, 2*np.pi, nstep)
xp = np.array([np.cos(phi), q*np.sin(phi)]).T*radius
theta = -(np.pi/2 + pa/180*np.pi) # + np.pi
_rot = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
x0 = xp.dot(_rot) + np.atleast_1d(xc)
xell, yell = (x0 - 1 - np.atleast_1d(psf_offset)).T
return xell, yell
def pix_to_r(shape, xc=[0., 0.], q=0.5, pa=0, re=1., gf_header=None, comp='2', verbose=True, oversample=1, psf_offset=[1, 1], **kwargs):
"""
Return an array of ellipse radii for a pixel grid and ellipse parameters
"""
if oversample > 1:
xarr = np.arange(-0.5, shape[1]-0.5, 1/oversample)
yarr = np.arange(-0.5, shape[0]-0.5, 1/oversample)
else:
xarr = np.arange(0, shape[1], 1/oversample)
yarr = np.arange(0, shape[0], 1/oversample)
x0 = np.array(np.meshgrid(xarr, yarr)).reshape((2, -1)).T + 1
x0 += np.atleast_1d(psf_offset)
if gf_header is not None:
xx = gf_header_key(gf_header, comp+'_XC')
yy = gf_header_key(gf_header, comp+'_YC')
xc = np.array([xx, yy])
mag = gf_header_key(gf_header, comp+'_MAG')
if comp+'_N' in gf_header:
n = gf_header_key(gf_header, comp+'_N')
q = gf_header_key(gf_header, comp+'_AR')
pa = gf_header_key(gf_header, comp+'_PA')
re = gf_header_key(gf_header, comp+'_RE')
else:
n = 1.
q = 1.
pa = 0.
re = 0.01
if verbose:
print(f'xc:{xc}, q:{q}, pa:{pa}')
theta = (np.pi/2 + pa/180*np.pi) # + np.pi
_rot = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
xp = (x0 - np.atleast_1d(xc)).dot(_rot)
outshape = [s*oversample for s in shape]
R = np.sqrt(((xp/np.array([1, q]))**2).sum(axis=1)).reshape(outshape)
phi = np.arctan2(xp[:, 1], xp[:, 0]).reshape(outshape)
return R, re, phi
def running_median(xi, yi, NBIN=10, use_median=True, use_nmad=True, reverse=False, bins=None, x_func=astropy.stats.biweight_location, y_func=astropy.stats.biweight_location, std_func=astropy.stats.biweight_midvariance, integrate=False):
"""
Running median/biweight/nmad
"""
NPER = xi.size // NBIN
if bins is None:
so = np.argsort(xi)
if reverse:
so = so[::-1]
bx = np.linspace(0, len(xi), NBIN+1)
bins = np.interp(bx, np.arange(len(xi)), xi[so])
if reverse:
bins = bins[::-1]
NBIN = len(bins)-1
xm = np.arange(NBIN)*1.
xs = xm*0
ym = xm*0
ys = xm*0
N = np.arange(NBIN)
if use_median:
y_func = np.median
if use_nmad:
std_func = astropy.stats.mad_std
for i in range(NBIN):
in_bin = (xi > bins[i]) & (xi <= bins[i+1])
N[i] = in_bin.sum() # N[i] = xi[so][idx+NPER*i].size
if integrate:
xso = np.argsort(xi[in_bin])
ma = xi[in_bin].max()
mi = xi[in_bin].min()
xm[i] = (ma+mi)/2.
dx = (ma-mi)
ym[i] = np.trapz(yi[in_bin][xso], xi[in_bin][xso])/dx
else:
xm[i] = x_func(xi[in_bin])
ym[i] = y_func(yi[in_bin])
ys[i] = std_func(yi[in_bin])
return xm, ym, ys, N
| mit |
abhi007tyagi/JARVIS | Python/JarvisBrain/trainer.py | 1 | 5819 | import nltk
from nltk.tokenize import word_tokenize
import random
from nltk.classify.scikitlearn import SklearnClassifier
import pickle
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.svm import SVC, LinearSVC, NuSVC
print("Reading training corpus...")
welcome = open("raw/welcome.txt", "r").read()
command = open("raw/commands.txt", "r").read()
maths = open("raw/maths.txt", "r").read()
random_things = open("raw/random.txt", "r").read()
print("Building documents and words...")
all_words = []
documents = []
# j is adject, r is adverb, and v is verb
allowed_word_types = ["J", "R", "V", "N", "CD"]
for wel in welcome.split("\n"):
documents.append((wel, "wel"))
words = word_tokenize(wel)
pos = nltk.pos_tag(words)
for w in pos:
if w[1][0] in allowed_word_types:
all_words.append(w[0].lower())
for cmd in command.split("\n"):
documents.append((cmd, "cmd"))
words = word_tokenize(cmd)
pos = nltk.pos_tag(words)
for w in pos:
if w[1][0] in allowed_word_types:
all_words.append(w[0].lower())
for mth in maths.split("\n"):
documents.append((mth, "mth"))
words = word_tokenize(mth)
pos = nltk.pos_tag(words)
for w in pos:
if w[1][0] in allowed_word_types:
all_words.append(w[0].lower())
# for rand in random_things.split("\n"):
# documents.append((rand, "rand"))
# words = word_tokenize(rand)
# pos = nltk.pos_tag(words)
# for w in pos:
# if w[1][0] in allowed_word_types:
# all_words.append(w[0].lower())
print("Saving documents...")
save_documents = open("pickled/documents.pickle", "wb")
pickle.dump(documents, save_documents)
save_documents.close()
all_words = nltk.FreqDist(all_words)
word_features = list(all_words.keys())
print("Saving words...")
save_word_features = open("pickled/word_features.pickle", "wb")
pickle.dump(word_features, save_word_features)
save_word_features.close()
def find_features(document):
words = word_tokenize(document)
features = {}
for w in word_features:
features[w] = (w in words)
return features
featuresets = [(find_features(rev), category) for (rev, category) in documents]
random.shuffle(featuresets)
random.shuffle(featuresets)
random.shuffle(featuresets)
random.shuffle(featuresets)
save_featureset = open("pickled/featuresets.pickle", "wb")
pickle.dump(featuresets, save_featureset)
save_featureset.close()
# print(featuresets)
print("Feature-set created of length ->", len(featuresets))
training_set = featuresets[:180]
testing_set = featuresets[180:]
print("Starting training different algorithms...")
orig_classifier = nltk.NaiveBayesClassifier.train(training_set)
print("Original_classifier Accuracy ->", (nltk.classify.accuracy(orig_classifier, testing_set)) * 100)
# orig_classifier.show_most_informative_features()
save_classifier = open("pickled/originalnaivebayes.pickle", "wb")
pickle.dump(orig_classifier, save_classifier)
save_classifier.close()
## Different classifiers are used at the moment. During code optimization, only the required classifier/s will be used.
MNB_classifier = SklearnClassifier(MultinomialNB())
MNB_classifier.train(training_set)
print("MNB_classifier Accuracy ->", (nltk.classify.accuracy(MNB_classifier, testing_set)) * 100)
save_classifier = open("pickled/MNB_classifier.pickle", "wb")
pickle.dump(MNB_classifier, save_classifier)
save_classifier.close()
BernoulliNB_classifier = SklearnClassifier(BernoulliNB())
BernoulliNB_classifier.train(training_set)
print("BernoulliNB_classifier Accuracy ->", (nltk.classify.accuracy(BernoulliNB_classifier, testing_set)) * 100)
save_classifier = open("pickled/BernoulliNB_classifier.pickle", "wb")
pickle.dump(BernoulliNB_classifier, save_classifier)
save_classifier.close()
LogisticRegression_classifier = SklearnClassifier(LogisticRegression())
LogisticRegression_classifier.train(training_set)
print("LogisticRegression_classifier Accuracy ->",
(nltk.classify.accuracy(LogisticRegression_classifier, testing_set)) * 100)
save_classifier = open("pickled/LogisticRegression_classifier.pickle", "wb")
pickle.dump(LogisticRegression_classifier, save_classifier)
save_classifier.close()
SGDClassifier_classifier = SklearnClassifier(SGDClassifier())
SGDClassifier_classifier.train(training_set)
print("SGDClassifier_classifier Accuracy ->", (nltk.classify.accuracy(SGDClassifier_classifier, testing_set)) * 100)
save_classifier = open("pickled/SGDC_classifier.pickle", "wb")
pickle.dump(SGDClassifier_classifier, save_classifier)
save_classifier.close()
# SVC_classifier = SklearnClassifier(SVC())
# SVC_classifier.train(training_set)
# print("SVC_classifier Accuracy ->", (nltk.classify.accuracy(SVC_classifier, testing_set))*100)
LinearSVC_classifier = SklearnClassifier(LinearSVC())
LinearSVC_classifier.train(training_set)
print("LinearSVC_classifier Accuracy ->", (nltk.classify.accuracy(LinearSVC_classifier, testing_set)) * 100)
save_classifier = open("pickled/LinearSVC_classifier.pickle", "wb")
pickle.dump(LinearSVC_classifier, save_classifier)
save_classifier.close()
NuSVC_classifier = SklearnClassifier(NuSVC())
NuSVC_classifier.train(training_set)
print("NuSVC_classifier Accuracy ->", (nltk.classify.accuracy(NuSVC_classifier, testing_set)) * 100)
save_classifier = open("pickled/NuSVC_classifier.pickle", "wb")
pickle.dump(NuSVC_classifier, save_classifier)
save_classifier.close()
#
# # custom_classifier = CustomClassifier(SGDClassifier, LinearSVC_classifier, LogisticRegression_classifier,
# # BernoulliNB_classifier, MNB_classifier)
# # print("Custom_classifier Accuracy ->", (nltk.classify.accuracy(custom_classifier, testing_set)) * 100)
| apache-2.0 |
DGrady/pandas | pandas/tests/io/parser/python_parser_only.py | 9 | 8017 | # -*- coding: utf-8 -*-
"""
Tests that apply specifically to the Python parser. Unless specifically
stated as a Python-specific issue, the goal is to eventually move as many of
these tests out of this module as soon as the C parser can accept further
arguments when parsing.
"""
import csv
import pytest
import pandas.util.testing as tm
from pandas import DataFrame, Index
from pandas import compat
from pandas.errors import ParserError
from pandas.compat import StringIO, BytesIO, u
class PythonParserTests(object):
def test_invalid_skipfooter(self):
text = "a\n1\n2"
# see gh-15925 (comment)
msg = "skipfooter must be an integer"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(text), skipfooter="foo")
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(text), skipfooter=1.5)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(text), skipfooter=True)
msg = "skipfooter cannot be negative"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(text), skipfooter=-1)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
tm.assert_index_equal(data.index,
Index(['foo', 'bar', 'baz'], name='index'))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""").encode('utf-8')
s = BytesIO(text)
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,
encoding='utf-8')
tm.assert_frame_equal(data, data4)
def test_BytesIO_input(self):
if not compat.PY3:
pytest.skip(
"Bytes-related test - only needs to work on Python 3")
data = BytesIO("שלום::1234\n562::123".encode('cp1255'))
result = self.read_table(data, sep="::", encoding='cp1255')
expected = DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
def test_single_line(self):
# see gh-6607: sniff separator
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
def test_skipfooter(self):
# see gh-6607
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = self.read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_decompression_regex_sep(self):
# see gh-6607
try:
import gzip
import bz2
except ImportError:
pytest.skip('need gzip and bz2 to run')
with open(self.csv1, 'rb') as f:
data = f.read()
data = data.replace(b',', b'::')
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='bz2')
tm.assert_frame_equal(result, expected)
pytest.raises(ValueError, self.read_csv,
path, compression='bz3')
def test_read_table_buglet_4x_multiindex(self):
# see gh-6607
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = self.read_table(StringIO(text), sep=r'\s+')
assert df.index.names == ('one', 'two', 'three', 'four')
# see gh-6893
data = ' A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9'
expected = DataFrame.from_records(
[(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)],
columns=list('abcABC'), index=list('abc'))
actual = self.read_table(StringIO(data), sep=r'\s+')
tm.assert_frame_equal(actual, expected)
def test_skipfooter_with_decimal(self):
# see gh-6971
data = '1#2\n3#4'
expected = DataFrame({'a': [1.2, 3.4]})
result = self.read_csv(StringIO(data), names=['a'],
decimal='#')
tm.assert_frame_equal(result, expected)
# the stray footer line should not mess with the
# casting of the first t wo lines if we skip it
data = data + '\nFooter'
result = self.read_csv(StringIO(data), names=['a'],
decimal='#', skipfooter=1)
tm.assert_frame_equal(result, expected)
def test_encoding_non_utf8_multichar_sep(self):
# see gh-3404
expected = DataFrame({'a': [1], 'b': [2]})
for sep in ['::', '#####', '!!!', '123', '#1!c5',
'%!c!d', '@@#4:2', '_!pd#_']:
data = '1' + sep + '2'
for encoding in ['utf-16', 'utf-16-be', 'utf-16-le',
'utf-32', 'cp037']:
encoded_data = data.encode(encoding)
result = self.read_csv(BytesIO(encoded_data),
sep=sep, names=['a', 'b'],
encoding=encoding)
tm.assert_frame_equal(result, expected)
def test_multi_char_sep_quotes(self):
# see gh-13374
data = 'a,,b\n1,,a\n2,,"2,,b"'
msg = 'ignored when a multi-char delimiter is used'
with tm.assert_raises_regex(ParserError, msg):
self.read_csv(StringIO(data), sep=',,')
# We expect no match, so there should be an assertion
# error out of the inner context manager.
with pytest.raises(AssertionError):
with tm.assert_raises_regex(ParserError, msg):
self.read_csv(StringIO(data), sep=',,',
quoting=csv.QUOTE_NONE)
def test_skipfooter_bad_row(self):
# see gh-13879
# see gh-15910
msg = 'parsing errors in the skipped footer rows'
for data in ('a\n1\n"b"a',
'a,b,c\ncat,foo,bar\ndog,foo,"baz'):
with tm.assert_raises_regex(ParserError, msg):
self.read_csv(StringIO(data), skipfooter=1)
# We expect no match, so there should be an assertion
# error out of the inner context manager.
with pytest.raises(AssertionError):
with tm.assert_raises_regex(ParserError, msg):
self.read_csv(StringIO(data))
| bsd-3-clause |
andykimpe/chromium-test-npapi | chrome/browser/nacl_host/test/gdb_rsp.py | 99 | 2431 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is based on gdb_rsp.py file from NaCl repository.
import re
import socket
import time
def RspChecksum(data):
checksum = 0
for char in data:
checksum = (checksum + ord(char)) % 0x100
return checksum
class GdbRspConnection(object):
def __init__(self, addr):
self._socket = self._Connect(addr)
def _Connect(self, addr):
# We have to poll because we do not know when sel_ldr has
# successfully done bind() on the TCP port. This is inherently
# unreliable.
# TODO(mseaborn): Add a more reliable connection mechanism to
# sel_ldr's debug stub.
timeout_in_seconds = 10
poll_time_in_seconds = 0.1
for i in xrange(int(timeout_in_seconds / poll_time_in_seconds)):
# On Mac OS X, we have to create a new socket FD for each retry.
sock = socket.socket()
try:
sock.connect(addr)
except socket.error:
# Retry after a delay.
time.sleep(poll_time_in_seconds)
else:
return sock
raise Exception('Could not connect to sel_ldr\'s debug stub in %i seconds'
% timeout_in_seconds)
def _GetReply(self):
reply = ''
while True:
data = self._socket.recv(1024)
if len(data) == 0:
raise AssertionError('EOF on socket reached with '
'incomplete reply message: %r' % reply)
reply += data
if '#' in data:
break
match = re.match('\+\$([^#]*)#([0-9a-fA-F]{2})$', reply)
if match is None:
raise AssertionError('Unexpected reply message: %r' % reply)
reply_body = match.group(1)
checksum = match.group(2)
expected_checksum = '%02x' % RspChecksum(reply_body)
if checksum != expected_checksum:
raise AssertionError('Bad RSP checksum: %r != %r' %
(checksum, expected_checksum))
# Send acknowledgement.
self._socket.send('+')
return reply_body
# Send an rsp message, but don't wait for or expect a reply.
def RspSendOnly(self, data):
msg = '$%s#%02x' % (data, RspChecksum(data))
return self._socket.send(msg)
def RspRequest(self, data):
self.RspSendOnly(data)
return self._GetReply()
def RspInterrupt(self):
self._socket.send('\x03')
return self._GetReply()
| bsd-3-clause |
icdishb/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 242 | 5885 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |
trhongbinwang/data_science_journey | python/pandas/pandas_summary.py | 1 | 7928 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 7 15:55:37 2016
pandas summary
http://pandas.pydata.org/pandas-docs/stable/basics.html
can search this page to find howto
0 or ‘index’ for row-wise, 1 or ‘columns’ for column-wise
"""
import pickle
import pandas as pd
import numpy as np
# used in notebook
import matplotlib.pyplot as plt
%matplotlib inline
#.iloc() -- position index; .loc() -- label index.
# general info
df.head(), df.info()
# read excel data from a folder
from os import listdir
from os.path import isfile, join
# windows change from \ to /
mypath = 'C://Documents/data//'
fileNames = [f for f in listdir(mypath) if isfile(join(mypath, f))]
df_all = pd.DataFrame() # empty df
for fileName in fileNames:
fileName = join(mypath, fileName)
df = pd.read_excel(fileName)
print(df.shape)
df_all = df_all.append(df)
print(df_all.shape)
print(df_all.info())
# write into a excel file
writer = ExcelWriter('historical.xlsx')
df.to_excel(writer, 'fx')
writer.save()
# dump or load pickle
with open('historical.pkl', 'wb') as f:
pickle.dump(df_all, f)
df_loaded = pickle.load(open('historical.pkl', 'rb'))
# pd select columns
df_all = df_all[['level', 'name']]
# pd filtering
df = df[df['st'] != '-']
df = df[df['tr'] > 60]
# convert time from string to datetime
df['time']= pd.to_datetime(df['time'], infer_datetime_format=True)
# filtering the time
df_past = df[df['happen_time'] < datetime.datetime(2016,6,1,0,0,0)]
# pd sort by a column value
df = df.sort_values(by='st')
# format print
print('number of action = {0}'.format(len(df)))
# iterate dataframe
for i in range(1,len(df)):
if ((df.iloc[i]['name'] == df.iloc[i-1]['name']) # two index access maynot can change it
# generate new collumn from existing columns
df['tr'] = (df['st1'] - df['st']).apply(lambda x: x.total_seconds())
df['source'] = df['s1'] + ' / ' + df['s2']
# drop na and duplicates
df = df.drop_duplicates(['name', 'tv'])
df = df.dropna(subset = ['name'])
# convert pd into numpy array
pd.values
# another way to iterate pd row by row
for row in df.itertuples(index=False):
print(row) # row is a namedtuple
# group data using column
grouped = df.groupby('source')
for name, group in grouped:
# set value to an element in df
df.iloc[i, df.columns.get_loc('ro')] = 'de'
# drop columns from df
df = df.drop(['level', 'tr'], axis=1)
# pd concat or append
df = pd.concat([df, df_latest], axis=0)
# create pandas df from list of tuple, also can from a list or a dict
name_fre = [(name, len(group)) for name, group in grouped]
name_fre_df = pd.DataFrame(name_fre, columns=['name', 'Freq'])
# double condition selection
small_alarms = df[(name_fre_df['Frequency']>10) & (name_fre_df['Frequency']<100)]['name'].values.tolist()
# only select value from a list
df_large = df[df['name'].isin(large)]
# pd one hot encoding categorical data, better one use sklearn or write your own
hour_dummies = pd.get_dummies(ml_df['hour'],prefix='hour')
# apply func to a column
ip_head = ip_head.apply(lambda x: x.split('.')[0])
# add a column in pd
data['price'] = 0
# delete a column
del data['price']
# rename a column
data = data.rename(columns={'NAME':'PLANET'})
# The column names, as an Index object
print( df.columns )
# convert to list
list(df.columns)
# view pd's column in two styles
df.beer_style# 1. attribute style
df['beer_style'] # 2. dict style
# string operation and selection
boolean_mask = df.beer_style.str.contains('[A|a]merican')
df.beer_style[boolean_mask]
# Statistical Operations through Indexing
beer_ids = df.beer_id.value_counts(sort=True) # count frequency of each id
# iloc access
# row, column
df.iloc[[2,5,10],0:3]
# lambda funciton
lambda x: x= 0
# 3 steps
grouped = data.groupby(lambda x: x.year) #1. group
grouped.get_group(1997) #2. split
gr = df.groupby('beer_style') # gr is a groupby object
gr.agg('mean') # get some statistics from each group
# only a subset
review_columns = ['abv','review_overall','review_appearance',
'review_palate','review_taste']
gr[review_columns].agg('mean')
gr['review_aroma'].agg([np.mean, np.std, 'count']) # multiple aggregation
#
#Attributes and the raw ndarray(s)
# shape; columns and index
df.columns = [x.lower() for x in df.columns]
# Flexible binary operations
row = df.ix[1]
column = df['two']
df.sub(row, axis='columns')
df + df2
df.add(df2, fill_value=0)
df.gt(df2)
# Descriptive statistics; DataFrame: “index” (axis=0, default), “columns” (axis=1)
df.mean(0)
df.sum(0, skipna=False)
# normalize column
ts_stand = (df - df.mean()) / df.std()
# normalize each row
xs_stand = df.sub(df.mean(1), axis=0).div(df.std(1), axis=0)
# index of min and max
df1.idxmin(axis=0)
df1.idxmax(axis=1)
# Value counts (histogramming)
s = pd.Series(np.random.randint(0, 7, size=50))
s.value_counts()
# Row or Column-wise Function Application
df.apply(np.mean)
df.apply(np.mean, axis=1)
# can pass additional parameters to function
# Applying elementwise Python functions
# applymap() for df; map() for Series (column); func takes a single value and output a single value
f = lambda x: len(str(x))
df4['one'].map(f)
df4.applymap(f)
# iteration
for i in object:
# Series, value
# dataframe, column name
for col in df:
print(col)
# itertuples is a lot faster than iterrows
# itertuples return each row as a namedtuple
# a namedtuple is as row = (Index=0, a=1, b='a'). can be access as: row.a
# .dt for Series
s = pd.Series(pd.date_range('20130101 09:10:12', periods=4))
s.dt.day
s.dt.hour
s.dt.second
s[s.dt.day==2]
s.dt.strftime('%Y/%m/%d') # convert datatime to string
# pd.date_range to generate data range
# Vectorized string methods - Series; exclude missing/NA values automatically
s = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat'])
s.str.lower()
# clean up the columns; chaining because all method return a Series
df.columns = df.columns.str.strip().str.lower().str.replace(' ', '_')
# Sorting
#sort by index
unsorted_df.sort_index()
# sort by value
df1 = pd.DataFrame({'one':[2,1,1,1],'two':[1,3,2,4],'three':[5,4,3,2]})
df1[['one', 'two', 'three']].sort_values(by=['one','two'])
# smallest / largest values
s = pd.Series(np.random.permutation(10))
s.nsmallest(3)
df = pd.DataFrame({'a': [-2, -1, 1, 10, 8, 11, -1],
'b': list('abdceff')})
df.nlargest(3, 'a')
# series.nlargest
s = pd.Series(np.randam.rand(100))
s.largest(10)
# dtypes of each columns
dft.dtypes
#astype
dft = pd.DataFrame({'a': [1,2,3], 'b': [4,5,6], 'c': [7, 8, 9]})
dft[['a','b']] = dft[['a','b']].astype(np.uint8)
#pd.to_datetime()
#to_numeric()
df.apply(pd.to_datetime)
# Method chaining; since each method return a df.
def read(fp):
df = (pd.read_csv(fp)
.rename(columns=str.lower)
.drop('unnamed: 36', axis=1)
.pipe(extract_city_name)
.pipe(time_to_datetime, ['dep_time', 'arr_time', 'crs_arr_time', 'crs_dep_time'])
.assign(fl_date=lambda x: pd.to_datetime(x['fl_date']),
dest=lambda x: pd.Categorical(x['dest']),
origin=lambda x: pd.Categorical(x['origin']),
tail_num=lambda x: pd.Categorical(x['tail_num']),
unique_carrier=lambda x: pd.Categorical(x['unique_carrier']),
cancellation_code=lambda x: pd.Categorical(x['cancellation_code'])))
return df
# pipe your own function
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe(f, arg2=b, arg3=c)
... )
| apache-2.0 |
Fireblend/scikit-learn | sklearn/neighbors/classification.py | 132 | 14388 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights)],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
KyleKing/My-Programming-Sketchbook | Python/BIOE232-Thermodynamics/HW19/BIOE232_HW19.py | 1 | 1394 | #' % HW19 - BIOE232
#' % Kyle King
#' % May 7, 2015
#' My First Python Script!
# For outputting nice HTML file/PDF
# import pweave
import pprint as pp
# All the essentials
import numpy as np # np.__version__
import matplotlib.pyplot as plt
import math
from decimal import *
from pylab import *
#' Question 1
# Declare varibales
Ka = 5.1E-7
pKa = -math.log10(Ka)
pH = np.linspace(0, 14, num=140)
ratio = 1/(1 + pow(10, (pKa - pH)))
# Print out constants
pp.pprint(['Ka = ', Ka])
pp.pprint(['pKa = ', pKa])
# Plot function
plot(pH, ratio)
title('HW19: Q1 The ratio of C_b / C_total versus pH')
xlabel('pH')
ylabel('Ratio of C_b / C_total')
grid(True)
# show()
#' Question 2
# Declare varibales
pKa1, pKa2 = 6.3, 10.8
pH = np.linspace(0, 14, num=140)
Dratio1 = 1/(1 + pow(10, (pKa1 - pH)))
Dratio2 = 1/(1 + pow(10, (pKa2 - pH)))
# Print out constants
# pp.pprint(['Dratio1 = ', Dratio1])
# pp.pprint(['Dratio2 = ', Dratio2])
# Plot function
fig = plt.figure()
ax1 = fig.add_axes([0.1, 0.1, 0.8, 0.7])
l1, l2 = ax1.plot(pH, Dratio1, '-*', pH, Dratio2)
fig.legend((l1, l2), ('Ajmalicine', 'Serpentine'), 'upper right')
title('HW19: Q2 The ratio of D_overall / D versus pH')
xlabel('pH')
ylabel('Ratio of D_overall / D')
grid(True)
# plt.show()
#' Question 3
pKa, pHv, pHc = 6.3, 3, 7
conc = (1+pow(10, pKa - pHv)) / (1+pow(10, pKa - pHc))
pp.pprint(['Concentration Ability = ', conc])
| mit |
mynlp/ccg2lambda | scripts/evaluate.py | 1 | 12193 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright 2015 Pascual Martinez-Gomez
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import codecs
from collections import Counter
import logging
from lxml import etree
import os
import sys
import textwrap
from tqdm import tqdm
from pandas_ml import ConfusionMatrix
from visualization_tools import convert_doc_to_mathml
from visualization_tools import wrap_mathml_in_html
def load_files(proof_fnames):
"""
From a list of XML filenames that contain a <proof> node,
it returns a list of lxml root nodes.
"""
roots = []
parser = etree.XMLParser(remove_blank_text=True)
for fname in proof_fnames:
docs = etree.parse(fname, parser)
roots.append(docs)
return roots
possible_inference_results = set(['yes', 'no', 'unknown'])
def select_result(inference_results):
"""
inference_results is a list with elements {'yes', 'no', 'unknown'}.
"""
if not inference_results:
return 'unknown'
for r in inference_results:
assert r in possible_inference_results, '{0} not in {1}'.format(r, possible_inference_results)
if r != 'unknown':
return r
return 'unknown'
def get_sys_labels(roots):
labels = dict()
for root in roots:
for doc in root.xpath('./document'):
problem_id = doc.get('pair_id', None)
if problem_id is not None and labels.get(problem_id, None) not in ['yes', 'no']:
inference_results = doc.xpath('./proof/@inference_result')
labels[problem_id] = select_result(inference_results)
return labels
def get_gold_labels(roots):
labels = dict()
for root in roots:
for doc in root.xpath('./document'):
problem_id = doc.get('pair_id', None)
rte_label = doc.get('rte_label', None)
if problem_id is not None and rte_label is not None:
if problem_id in labels and labels[problem_id] != rte_label:
logging.warning(
'problem_id {0} with different rte_label: {1} vs {2}'.format(
problem_id, labels[problem_id], rte_label))
else:
labels[problem_id] = rte_label
return labels
def print_accuracy(gold_labels, sys_labels):
if len(gold_labels) != len(sys_labels):
logging.warning(
'In computing accuracy, the number of gold and system labels differs: g{0} vs s{1}.'.format(
len(gold_labels), len(sys_labels)))
hits = 0
for prob_id, gold_label in gold_labels.items():
if sys_labels.get(prob_id, 'unknown') == gold_label:
hits += 1
accuracy = float(hits) / len(gold_labels)
print('Accuracy: {0:.4f} ({1}/{2})'.format(accuracy, hits, len(gold_labels)))
def print_label_distribution(labels, title=''):
c = Counter(labels.values())
print('Label Distribution {0}: {1}'.format(title.rjust(5), c))
def print_confusion_matrix(gold_id_labels, sys_id_labels):
gold_ids = gold_id_labels.keys()
gold_labels = [gold_id_labels[i] for i in gold_ids]
sys_labels = [sys_id_labels.get(i, 'unknown') for i in gold_ids]
c = ConfusionMatrix(gold_labels, sys_labels)
print('Confusion matrix:\n{0}'.format(c))
true_positives = c.get('yes', 'yes') + c.get('no', 'no')
true_negatives = c.get('unknown', 'unknown')
false_positives = c.get('unknown', 'yes') + c.get('unknown', 'no') + c.get('no', 'yes') + c.get('yes', 'no')
false_negatives = c.get('yes', 'unknown') + c.get('no', 'unknown')
print('Precision : {0:.4f}'.format(
float(true_positives) / (true_positives + false_positives)))
print('Recall : {0:.4f}'.format(
float(true_positives) / (true_positives + false_negatives)))
print('True positives : {0}'.format(true_positives))
print('True negatives : {0}'.format(true_negatives))
print('False positives: {0}'.format(false_positives))
print('False negatives: {0}'.format(false_negatives))
def print_num_syntactic_errors(roots):
"""
Syntactic parse errors are likely to be signaled by sentence XML nodes
for which there is no 'tokens' node (failure of syntactic parser
earlier in the pipeline).
"""
syn_errors = [s for root in roots for s in root.xpath(
'./document/sentences/sentence') if not s.xpath('./tokens')]
print('Syntactic parse errors: {0}'.format(len(syn_errors)))
def print_num_semantic_errors(roots):
sem_errors = [se for root in roots for se in root.xpath(
'./document/sentences/sentence/semantics[@status="failed"]')]
sem_syn_errors = [se for se in sem_errors if not se.getparent().xpath('./tokens')]
print('Semantic parse errors: {0} (from which {1} are syntactic errors)'.format(
len(sem_errors), len(sem_syn_errors)))
def print_proof_status_stats(roots):
statuses = [s for root in roots for s in root.xpath('./document/proof/@status')]
c = Counter(statuses)
print('Proof status distribution: {0}'.format(c))
def get_problems(roots, error='false_positives'):
if error == 'false_positives':
cond = '@rte_label = "unknown" and ./proof/@inference_result != "unknown"'
elif error == 'false_negatives':
cond = '@rte_label != "unknown" and ./proof/@inference_result = "unknown"'
elif error == 'true_positives':
cond = '@rte_label != "unknown" and ./proof/@inference_result = @rte_label'
elif error == 'true_negatives':
cond = '@rte_label = "unknown" and ./proof/@inference_result = @rte_label'
else:
return [p for root in roots for p in root.xpath('./document')]
problems = [p for root in roots for p in root.xpath('./document[{0}]'.format(cond))]
return problems
def get_open_formula(doc):
f = doc.xpath('./proof/theorems/theorem/failure_log[1]/@open_formula')
if len(f) == 0:
return 'no'
return f[0]
def get_type_error(doc):
f = doc.xpath('./proof/theorems/theorem/failure_log[1]/@type_error')
if len(f) == 0:
return 'no'
return f[0]
def print_stats_for(roots, error='false_positives'):
problems = get_problems(roots, error)
open_formulas = [get_open_formula(p) for p in problems]
type_errors = [get_type_error(p) for p in problems]
print('{0}: {1}'.format(error, len(problems)))
ct = Counter(type_errors)
print(' Type error distribution: {0}'.format(ct))
co = Counter(open_formulas)
print(' Open formula distribution: {0}'.format(co))
def make_html_header():
return (
"<!doctype html>\n"
"<html lang='en'>\n"
"<head>\n"
" <meta charset='UTF-8'>\n"
" <title>Evaluation results</title>\n"
" <style>\n"
" body {\n"
" font-size: 1.5em;\n"
" }\n"
" </style>\n"
"</head>\n"
"<body>\n"
"<table border='1'>\n"
"<tr>\n"
" <td>sick problem</td>\n"
" <td>gold answer</td>\n"
" <td>system answer</td>\n"
" <td>proving time</td>\n"
"</tr>\n")
def make_html_tail():
return '</table>\n</body>\n</html>'
def print_html_problem(doc, dir_name):
prob_id = doc.get('pair_id', '00000')
prob_html_fname = dir_name + '/' + prob_id + '.html'
if prob_id == '00000':
logging.warning(
'RTE problem ID unspecified. Overwriting ' + prob_html_fname)
coq_scripts = doc.xpath('./proof/theorems/theorem/coq_script/text()')
mathml_str = convert_doc_to_mathml(doc)
html_str = wrap_mathml_in_html(mathml_str)
with codecs.open(prob_html_fname, 'w', 'utf-8') as fout:
fout.write(html_str)
return
red_color="rgb(255,0,0)"
green_color="rgb(0,255,0)"
white_color="rgb(255,255,255)"
gray_color="rgb(136,136,136)"
def print_html_problems(problems, fname_base, dir_name):
html_head = make_html_header()
with codecs.open('{0}/{1}.html'.format(dir_name, fname_base), 'w', 'utf-8') as fout:
fout.write(html_head)
for p in tqdm(problems):
print_html_problem(p, dir_name)
gold_label = p.get('rte_label', 'None')
sys_label = p.xpath('./proof/@inference_result')[0]
if gold_label == 'unknown' and sys_label != 'unknown':
color = red_color # false positive
elif gold_label == sys_label:
color = green_color # true positive and true negative.
elif gold_label != 'unknown' and sys_label == 'unknown':
color = gray_color # false negative
else:
color = white_color
prob_id = p.get('pair_id', '00000')
prob_html_fname = prob_id + '.html'
proving_time = -1.0
html_str = (
'<tr>\n'
' <td><a style="background-color:{0};" href="{1}">{2}</a></td>\n'
' <td>{3}</td>\n'
' <td>{4}</td>\n'
' <td>{5}s</td>\n'
'</tr>\n').format(
color, prob_html_fname, prob_id, gold_label, sys_label, proving_time)
fout.write(html_str)
html_tail = make_html_tail()
fout.write(html_tail)
def print_html(roots, fname_base='main', dir_name='results'):
print('Creating HTML graphical output. Please be patient...')
problems = get_problems(roots, '')
print_html_problems(problems, fname_base + '_all', dir_name)
print('HTML graphical output written to {0}/{1}_all.html'.format(dir_name, fname_base))
def main(args = None):
DESCRIPTION=textwrap.dedent("""\
The XML input file proof should contain the gold and automatic inference results.
""")
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=DESCRIPTION)
parser.add_argument("proofs", nargs='+',
help="XML input filename(s) with proof results.")
parser.add_argument("--dir_name", nargs='?', type=str, default='',
help="Directory name where evaluation results will be stored.")
args = parser.parse_args()
logging.basicConfig(level=logging.WARNING)
if any(not os.path.exists(p) for p in args.proofs):
print('One of the files does not exists: {0}'.format(args.proofs),
file=sys.stderr)
parser.print_help(file=sys.stderr)
sys.exit(1)
proof_fnames = args.proofs
roots = load_files(proof_fnames)
gold_labels = get_gold_labels(roots)
sys_labels = get_sys_labels(roots)
print('Number of problems processed: {0}'.format(len(sys_labels)))
if gold_labels:
print_accuracy(gold_labels, sys_labels)
print_confusion_matrix(gold_labels, sys_labels)
print_label_distribution(gold_labels, 'gold')
print_label_distribution(sys_labels, 'sys')
print_stats_for(roots, 'false_positives')
print_stats_for(roots, 'false_negatives')
print_stats_for(roots, 'true_positives')
print_stats_for(roots, 'true_negatives')
else:
logging.warning('No gold RTE labels provided.')
print_num_syntactic_errors(roots)
print_num_semantic_errors(roots)
print_proof_status_stats(roots)
if args.dir_name:
if not os.path.exists(args.dir_name):
os.makedirs(args.dir_name)
print_html(roots, 'main', args.dir_name)
# fps = get_problems(roots, 'false_negatives')
# for fp in fps:
# print('{0} {1} {2}'.format(
# fp.get('pair_id'),
# fp.get('rte_label'),
# fp.xpath('./proof/@inference_result')[0]))
if __name__ == '__main__':
main()
| apache-2.0 |
daodaoliang/bokeh | examples/charts/file/scatter.py | 37 | 1607 |
from collections import OrderedDict
import pandas as pd
from bokeh.charts import Scatter, output_file, show, vplot
from bokeh.sampledata.iris import flowers
setosa = flowers[(flowers.species == "setosa")][["petal_length", "petal_width"]]
versicolor = flowers[(flowers.species == "versicolor")][["petal_length", "petal_width"]]
virginica = flowers[(flowers.species == "virginica")][["petal_length", "petal_width"]]
xyvalues = OrderedDict([("setosa", setosa.values), ("versicolor", versicolor.values), ("virginica", virginica.values)])
scatter1 = Scatter(xyvalues, title="iris dataset, dict_input", xlabel="petal_length",
ylabel="petal_width", legend='top_left', marker="triangle")
groupped_df = flowers[["petal_length", "petal_width", "species"]].groupby("species")
scatter2 = Scatter(groupped_df, title="iris dataset, dict_input", xlabel="petal_length",
ylabel="petal_width", legend='top_left')
pdict = OrderedDict()
for i in groupped_df.groups.keys():
labels = groupped_df.get_group(i).columns
xname = labels[0]
yname = labels[1]
x = getattr(groupped_df.get_group(i), xname)
y = getattr(groupped_df.get_group(i), yname)
pdict[i] = list(zip(x, y))
df = pd.DataFrame(pdict)
scatter3 = Scatter(
df, title="iris dataset, dict_input",
xlabel="petal_length", ylabel="petal_width", legend='top_left')
scatter4 = Scatter(
list(xyvalues.values()), title="iris dataset, dict_input",
xlabel="petal_length", ylabel="petal_width", legend='top_left')
output_file("scatter.html")
show(vplot(scatter1, scatter2, scatter3, scatter4))
| bsd-3-clause |
smjhnits/Praktikum_TU_D_16-17 | Anfängerpraktikum/Protokolle/V355_Gekoppelte_Schwingungen/LaTex-Dateien/Messungb_Plot1.py | 1 | 1554 | import numpy as np
from scipy.stats import sem
from uncertainties import ufloat
import uncertainties.unumpy as unp
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
L = 32.51 * 10 ** (-3)
C = 0.801 * 10 ** (-9)
Csp = 0.037 * 10 ** (-9)
R = 48
Nü_negativ = np.array([33.16, 33.66, 34.25, 35.12, 36.08, 37.60, 40.28, 47.33]) * 10 ** (3)
Nü_positiv = np.array([30.77, 30.79, 30.80, 30.81, 30.82, 30.83, 30.84, 30.85]) * 10 ** (3)
Kopplungskapazitäten = np.array([9.99, 8, 6.47, 5.02, 4.00, 3.00, 2.03, 1.01]) * 10 ** (-9)
C_K_Error = np.array([ufloat(n, 0.003*n) for n in Kopplungskapazitäten])
nu_m_theo = np.array([1 / ( 2 * np.pi * unp.sqrt( L * ( (1/C + 2/n)**(-1) + Csp) ) ) for n in C_K_Error])
nu_p_theo = 1 / ( 2 * np.pi * np.sqrt( L * ( C + Csp) ) )
nu_p_theo1 = np.array([nu_p_theo, nu_p_theo, nu_p_theo, nu_p_theo, nu_p_theo, nu_p_theo, nu_p_theo, nu_p_theo ])
nu_m_theo1 = np.array([unp.nominal_values(n) for n in nu_m_theo])
plt.plot(Kopplungskapazitäten, Nü_negativ*10**(-3), 'bx', label = r'Messung 3.2: $\nu_{-}$')
plt.plot(Kopplungskapazitäten, nu_m_theo1*10**(-3), 'rx', label = r'Theoriewerte: $\nu_{-}$')
plt.plot(Kopplungskapazitäten, Nü_positiv*10**(-3), 'mx', label = r'Messung 3.2: $\nu_{+}$')
plt.plot(Kopplungskapazitäten, nu_p_theo1*10**(-3), 'yx', label = r'Theoriewerte: $\nu_{+}$')
plt.xlabel(r'$Kopplungskapazität \,\, C_k \,\, in \,\, \mathrm{F}$')
plt.ylabel(r'$Frequenzen \,\, \nu \,\, in \,\, \mathrm{kHz}$')
plt.legend(loc = 'best')
#plt.show()
plt.savefig('Messungb_Plot1.pdf')
| mit |
daniaki/pyPPI | pyppi/data_mining/uniprot.py | 1 | 16331 | """
Purpose: Wrapper methods for accessing uniprot records using biopython. See
http://biopython.org/DIST/docs/api/Bio.SwissProt.Record-class.html for more
information about how biopython stores records.
"""
import time
import logging
import pandas as pd
from Bio import SwissProt
from Bio import ExPASy
from bioservices import UniProt as UniProtMapper
from urllib.error import HTTPError
from enum import Enum
from joblib import delayed, Parallel
from ..base.utilities import chunk_list
from ..base.io import uniprot_sprot, uniprot_trembl
from ..database.models import Protein
from ..database.validators import (
validate_go_annotations,
validate_boolean, validate_pfam_annotations,
validate_function, validate_interpro_annotations,
validate_keywords
)
UNIPROT_ORD_KEY = dict(P=0, Q=1, O=2)
logger = logging.getLogger("pyppi")
http_error_msg = "Unrecoverable HTTPError downloading record for {}."
ERRORS_TO_RETRY = ('503', '504', '408')
# --------------------------------------------------------------------------- #
#
# Biopython/SwissProt Download Utilities
#
# --------------------------------------------------------------------------- #
def download_record(accession, verbose=False, wait=5,
retries=3, taxon_id=9606):
"""Download a record for a UniProt accession via the UniProt API.
The call will retry upon timeout up to the number specified by `retries`.
Parameters
----------
accession : str
UniProt accession.
verbose : bool, optional
If True, log informational and warning messages to the console.
wait : int, optional
Seconds to wait before retrying download.
retries : int, optional
Number of times to retry the download if the server returns
errors `503`, `504` or `408`.
taxon_id : int, optional
The taxonomy id to download the accession for. No record is returned
if the downloaded record does not match this id.
Returns
-------
:class:`Bio.SwissProt.Record`
A UniProt record instance.
"""
record = None
success = False
try:
handle = ExPASy.get_sprot_raw(accession)
record = SwissProt.read(handle)
success = True
except HTTPError as httperr:
if httperr.code in ERRORS_TO_RETRY:
if verbose:
logger.exception(http_error_msg.format(accession))
logger.info("Re-attempting to download.")
for i in range(retries):
logger.info("Attempt %s/%s." % (i + 1, retries))
time.sleep(wait)
try:
handle = ExPASy.get_sprot_raw(accession)
record = SwissProt.read(handle)
success = True
except HTTPError:
pass
else:
if verbose:
logger.exception(http_error_msg.format(accession))
except ValueError:
if verbose:
logger.exception("No record found for '{}'.".format(accession))
record = None
success = False
if not success:
if verbose:
logger.warning(
"Failed to download record for '{}'".format(accession)
)
record = None
if (not taxon_id is None) and (not record is None) and \
int(record.taxonomy_id[0]) != taxon_id:
if verbose:
logger.warning(
"Taxonomy IDs do not match for record {}. "
"Expected '{}' but found '{}'.".format(
accession, taxon_id, int(record.taxonomy_id[0])
)
)
record = None
return record
def download_records(accessions, verbose=False, wait=5,
retries=3, taxon_id=9606):
"""Download records for each UniProt accession via the UniProt API.
The call will retry upon timeout up to the number specified by `retries`.
Parameters
----------
accession : str
UniProt accession.
verbose : bool, optional
If True, log informational and warning messages to the console.
wait : int, optional
Seconds to wait before retrying download.
retries : int, optional
Number of times to retry the download if the server returns
errors `503`, `504` or `408`.
taxon_id : int, optional
The taxonomy id to download the accession for. No record is returned
if the downloaded record does not match this id.
Returns
-------
`list`
A list of :class:`Bio.SwissProt.Record` record instances.
"""
return [
download_record(a, verbose, wait, retries, taxon_id)
for a in accessions
]
def parallel_download(accessions, backend="multiprocessing",
verbose=False, n_jobs=1, wait=5,
retries=3, taxon_id=9606):
"""Parallel download records for UniProt accessions via the UniProt API.
The call will retry upon timeout up to the number specified by `retries`.
Parameters
----------
accession : str
UniProt accession.
verbose : bool, optional
If True, log informational and warning messages to the console.
wait : int, optional
Seconds to wait before retrying download.
retries : int, optional
Number of times to retry the download if the server returns
errors `503`, `504` or `408`.
taxon_id : int, optional
The taxonomy id to download the accession for. No record is returned
if the downloaded record does not match this id.
backend : str
A supported `Joblib` backend. Can be either 'multiprocessing' or
'threading'.
Returns
-------
`list`
A list of :class:`Bio.SwissProt.Record` record instances.
"""
# Warning: Setting backend to multiprocessing may cause strange errors.
# This is most likely due to this function not being run in a
# protected main loop.
accession_chunks = chunk_list(accessions, n=n_jobs)
records = Parallel(backend=backend, verbose=verbose, n_jobs=n_jobs)(
delayed(download_records)(chunk, verbose, wait, retries, taxon_id)
for chunk in list(accession_chunks)
)
return [r for sublist in records for r in sublist]
def serialise_record(record):
"""
Serialises the fields in a record that can then used to instantiate
a :class:`Protein` instance.
Parameters
----------
record : :class:`Bio.SwissProt.Record`
Record to serialise
Returns
-------
`dict`
Serialises the fields in a record that can then used to instantiate
a :class:`Protein` instance.
"""
if record is None:
return None
else:
uniprot_id = recent_accession(record)
taxon_id = taxonid(record)
gene_id = gene_name(record)
go_mf = go_terms(record, ont="mf")
go_bp = go_terms(record, ont="bp")
go_cc = go_terms(record, ont="cc")
interpro = interpro_terms(record)
pfam = pfam_terms(record)
reviewed = True if review_status(record) == 'Reviewed' else False
keywords_ = keywords(record)
function_ = function(record)
last_update_ = last_update(record)
last_release_ = last_release(record)
data = dict(
uniprot_id=uniprot_id, taxon_id=taxon_id, reviewed=reviewed,
gene_id=gene_id, go_mf=go_mf, go_bp=go_bp, go_cc=go_cc,
interpro=interpro, pfam=pfam, keywords=keywords_,
function=function_, last_update=last_update_,
last_release=last_release_
)
return data
def parse_record_into_protein(record, verbose=False):
"""
Instantiate a :class:`Protein` instance from a
:class:`Bio.SwissProt.Record` instance. It will not be saved to the
global session or database.
Parameters
----------
record : :class:`Bio.SwissProt.Record`
Record to turn into a :class:`Protein` instance.
verbose : bool, optional
If True, log informational and warning messages to the console.
Returns
-------
:class:`Protein`
Instantiated protein instance that has not been saved.
"""
if record is None:
return None
try:
constuctor_args = serialise_record(record)
entry = Protein(**constuctor_args)
return entry
except:
if verbose:
logger.exception("An error occured when trying to parse record.")
raise
# --------------------------------------------------------------------------- #
#
# Biopython/SwissProt Record Parsing
#
# --------------------------------------------------------------------------- #
def batch_map(accessions, fr='ACC+ID', allow_download=False, cache=False,
session=None, keep_unreviewed=True, match_taxon_id=9606,
verbose=False):
"""
Map a list of accessions using the UniProt batch mapping service.
Parameters
----------
accessions : list
List of accessions.
fr : str, optional
Database to map from. See :class:`bioservices.UniProt`.
keep_unreviewed : bool, optional
If True, keep the unreviewed accession in mapping.
allow_download : bool, optional
If True, will download records that are missing for any accession
in `accessions`.
cache : bool, optional
If True, `bioservices` cache will be used by
:class:`bioservices.UniProt`. Set to `False` to use the most up-to-date
mappings.
session : `scoped_session`, optional
Session instance to save protein instances to if `allow_download`
is True.
match_taxon_id : int, optional
Ignores mappings to or from proteins that do not match this id.
verbose : bool, optional
Log info/warning/error messages to the console.
Returns
-------
`dict`
A dictionary of mappings from UniProt accessions to the most
up-to-date UniProt accessions. Dictionary values are lists.
"""
uniprot_mapper = UniProtMapper(cache=cache)
filtered_mapping = {}
mapping = uniprot_mapper.mapping(fr=fr, to='ACC', query=accessions)
# No data was downloaded, try again a few times.
if mapping == {}:
for i in range(0, 4):
mapping = uniprot_mapper.mapping(
fr=fr, to='ACC', query=accessions
)
if mapping:
break
else:
if verbose:
logger.warning(
"Could not download map from uniprot server. "
"Attempt {}/5. Re-attempt in 3 seconds.".format(i + 2)
)
time.sleep(3)
if mapping == {}:
raise ValueError("Could not download map from uniprot server.")
for fr, to in mapping.items():
# Make sure any new accessions are in the database
invalid_to = []
for accession in to:
# Check to see if a protein macthing accession and the
# taxon id exists.
entry = Protein.get_by_uniprot_id(accession)
if entry is not None:
if (match_taxon_id is not None) and entry.taxon_id != match_taxon_id:
invalid_to.append(accession)
else:
if allow_download:
if verbose:
logger.info(
"Mapping to {}, but entry not found in database. "
"Attempting download.".format(accession)
)
record = download_record(
accession, verbose=True, taxon_id=match_taxon_id
)
protein = parse_record_into_protein(record)
if protein is not None:
protein.save(session, commit=True)
else:
if verbose:
logger.info(
"No valid record for {} was found".format(
accession)
)
invalid_to.append(accession)
else:
invalid_to.append(accession)
to = [a for a in to if a not in invalid_to]
status = [Protein.get_by_uniprot_id(a).reviewed for a in to]
reviewed = [a for (a, s) in zip(to, status) if s is True]
unreviewed = [a for (a, s) in zip(to, status) if s is False]
targets = reviewed
if keep_unreviewed:
targets += unreviewed
targets = list(set(targets))
if not (match_taxon_id is None):
taxon_ids = [
Protein.get_by_uniprot_id(a).taxon_id for a in targets
]
targets = [
t for (t, taxon_id) in zip(targets, taxon_ids)
if match_taxon_id == taxon_id
]
filtered_mapping[fr] = list(sorted(targets))
return filtered_mapping
def __xrefs(db_name, record):
result = []
for xref in record.cross_references:
extdb = xref[0]
if extdb == db_name:
result.append(xref[1:])
return result
def recent_accession(record):
if not record:
return None
return record.accessions[0]
def taxonid(record):
if not record:
return None
data = record.taxonomy_id[0]
return int(data)
def review_status(record):
if not record:
return None
return record.data_class
def gene_name(record):
if not record:
return None
try:
data = record.gene_name.split(';')[0].split('=')[-1].split(' ')[0]
except (KeyError, AssertionError, Exception):
data = None
if not data:
return None
return data
def go_terms(record, ont):
if not record:
return None
data = __xrefs("GO", record)
ids = list(map(lambda x: x[0], data))
names = list(map(lambda x: x[1], data))
if ont == 'mf':
ids = [i for (i, n) in zip(ids, names) if n[0] == 'F']
elif ont == 'bp':
ids = [i for (i, n) in zip(ids, names) if n[0] == 'P']
elif ont == 'cc':
ids = [i for (i, n) in zip(ids, names) if n[0] == 'C']
else:
pass
return ids
def pfam_terms(record):
if not record:
return None
data = __xrefs("Pfam", record)
return list(map(lambda x: x[0], data))
def interpro_terms(record):
if not record:
return None
data = __xrefs("InterPro", record)
return list(map(lambda x: x[0], data))
def keywords(record):
if not record:
return None
data = record.keywords
return data
def organism_code(record):
if not record:
return None
data = record.entry_name
data = data.split('_')[1]
return data
def entry_name(record):
if not record:
return None
return record.entry_name
def last_release(record):
if not record:
return None
return int(record.annotation_update[1])
def last_update(record):
if not record:
return None
return record.annotation_update[0]
def synonyms(record):
if not record:
return None
try:
data = record.gene_name.split(';')[1].split('=')[1].split(
', ').split(' ')[0]
except (KeyError, AssertionError, Exception):
data = None
return data
def function(r):
if r is None:
return None
elif not r.comments:
return None
else:
function = [x for x in r.comments if 'FUNCTION:' in x]
if not function:
return None
else:
return function[0].replace("FUNCTION: ", '')
| mit |
HesselTjeerdsma/Cyber-Physical-Pacman-Game | Algor/flask/lib/python2.7/site-packages/scipy/misc/common.py | 17 | 6104 | """
Functions which are common and require SciPy Base and Level 1 SciPy
(special, linalg)
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import arange, newaxis, hstack, product, array, fromstring
__all__ = ['central_diff_weights', 'derivative', 'lena', 'ascent', 'face']
def central_diff_weights(Np, ndiv=1):
"""
Return weights for an Np-point central derivative.
Assumes equally-spaced function points.
If weights are in the vector w, then
derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx)
Parameters
----------
Np : int
Number of points for the central derivative.
ndiv : int, optional
Number of divisions. Default is 1.
Notes
-----
Can be inaccurate for large number of points.
"""
if Np < ndiv + 1:
raise ValueError("Number of points must be at least the derivative order + 1.")
if Np % 2 == 0:
raise ValueError("The number of points must be odd.")
from scipy import linalg
ho = Np >> 1
x = arange(-ho,ho+1.0)
x = x[:,newaxis]
X = x**0.0
for k in range(1,Np):
X = hstack([X,x**k])
w = product(arange(1,ndiv+1),axis=0)*linalg.inv(X)[ndiv]
return w
def derivative(func, x0, dx=1.0, n=1, args=(), order=3):
"""
Find the n-th derivative of a function at a point.
Given a function, use a central difference formula with spacing `dx` to
compute the `n`-th derivative at `x0`.
Parameters
----------
func : function
Input function.
x0 : float
The point at which `n`-th derivative is found.
dx : float, optional
Spacing.
n : int, optional
Order of the derivative. Default is 1.
args : tuple, optional
Arguments
order : int, optional
Number of points to use, must be odd.
Notes
-----
Decreasing the step size too small can result in round-off error.
Examples
--------
>>> from scipy.misc import derivative
>>> def f(x):
... return x**3 + x**2
>>> derivative(f, 1.0, dx=1e-6)
4.9999999999217337
"""
if order < n + 1:
raise ValueError("'order' (the number of points used to compute the derivative), "
"must be at least the derivative order 'n' + 1.")
if order % 2 == 0:
raise ValueError("'order' (the number of points used to compute the derivative) "
"must be odd.")
# pre-computed for n=1 and 2 and low-order for speed.
if n == 1:
if order == 3:
weights = array([-1,0,1])/2.0
elif order == 5:
weights = array([1,-8,0,8,-1])/12.0
elif order == 7:
weights = array([-1,9,-45,0,45,-9,1])/60.0
elif order == 9:
weights = array([3,-32,168,-672,0,672,-168,32,-3])/840.0
else:
weights = central_diff_weights(order,1)
elif n == 2:
if order == 3:
weights = array([1,-2.0,1])
elif order == 5:
weights = array([-1,16,-30,16,-1])/12.0
elif order == 7:
weights = array([2,-27,270,-490,270,-27,2])/180.0
elif order == 9:
weights = array([-9,128,-1008,8064,-14350,8064,-1008,128,-9])/5040.0
else:
weights = central_diff_weights(order,2)
else:
weights = central_diff_weights(order, n)
val = 0.0
ho = order >> 1
for k in range(order):
val += weights[k]*func(x0+(k-ho)*dx,*args)
return val / product((dx,)*n,axis=0)
def lena():
"""
Function that previously returned an example image
.. note:: Removed in 0.17
Parameters
----------
None
Returns
-------
None
Raises
------
RuntimeError
This functionality has been removed due to licensing reasons.
Notes
-----
The image previously returned by this function has an incompatible license
and has been removed from SciPy. Please use `face` or `ascent` instead.
See Also
--------
face, ascent
"""
raise RuntimeError('lena() is no longer included in SciPy, please use '
'ascent() or face() instead')
def ascent():
"""
Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy use in demos
The image is derived from accent-to-the-top.jpg at
http://www.public-domain-image.com/people-public-domain-images-pictures/
Parameters
----------
None
Returns
-------
ascent : ndarray
convenient image to use for testing and demonstration
Examples
--------
>>> import scipy.misc
>>> ascent = scipy.misc.ascent()
>>> ascent.shape
(512, 512)
>>> ascent.max()
255
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(ascent)
>>> plt.show()
"""
import pickle
import os
fname = os.path.join(os.path.dirname(__file__),'ascent.dat')
with open(fname, 'rb') as f:
ascent = array(pickle.load(f))
return ascent
def face(gray=False):
"""
Get a 1024 x 768, color image of a raccoon face.
raccoon-procyon-lotor.jpg at http://www.public-domain-image.com
Parameters
----------
gray : bool, optional
If True return 8-bit grey-scale image, otherwise return a color image
Returns
-------
face : ndarray
image of a racoon face
Examples
--------
>>> import scipy.misc
>>> face = scipy.misc.face()
>>> face.shape
(768, 1024, 3)
>>> face.max()
255
>>> face.dtype
dtype('uint8')
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(face)
>>> plt.show()
"""
import bz2
import os
with open(os.path.join(os.path.dirname(__file__), 'face.dat'), 'rb') as f:
rawdata = f.read()
data = bz2.decompress(rawdata)
face = fromstring(data, dtype='uint8')
face.shape = (768, 1024, 3)
if gray is True:
face = (0.21 * face[:,:,0] + 0.71 * face[:,:,1] + 0.07 * face[:,:,2]).astype('uint8')
return face
| apache-2.0 |
aiguofer/bokeh | bokeh/charts/builders/line_builder.py | 4 | 9608 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Line class which lets you build your Line charts just
passing the arguments to the Chart class and calling the proper functions.
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from __future__ import absolute_import
from six import iteritems
from itertools import chain
from ..builder import XYBuilder, create_and_build
from ..glyphs import LineGlyph, PointGlyph
from ..attributes import DashAttr, ColorAttr, MarkerAttr
from ..data_source import NumericalColumnsAssigner
from ...models.sources import ColumnDataSource
from ...core.properties import Bool, String, List
from ..operations import Stack, Dodge
from ..utils import add_tooltips_columns
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
def Line(data=None, x=None, y=None, **kws):
""" Create a line chart using :class:`LineBuilder <bokeh.charts.builders.line_builder.LineBuilder>` to
render the glyphs.
The line chart is typically is used with column oriented data, where each column
contains comparable measurements and the column names are treated as a categorical
variable for differentiating the measurement values. One of the columns can be used as
an index for either the x or y axis.
.. note::
Only the x or y axis can display multiple variables, while the other is used
as an index.
Args:
data (list(list), numpy.ndarray, pandas.DataFrame, list(pd.Series)): a 2d data
source with columns of data for each line.
x (str or list(str), optional): specifies variable(s) to use for x axis
y (str or list(str), optional): specifies variable(s) to use for y axis
In addition to the parameters specific to this chart,
:ref:`userguide_charts_defaults` are also accepted as keyword parameters.
.. note::
This chart type differs on input types as compared to other charts,
due to the way that line charts typically are plotting labeled series. For
example, a column for AAPL stock prices over time. Another way this could be
plotted is to have a DataFrame with a column of `stock_label` and columns of
`price`, which is the stacked format. Both should be supported, but the former
is the expected one. Internally, the latter format is being derived.
Returns:
:class:`Chart`: includes glyph renderers that generate the lines
Examples:
.. bokeh-plot::
:source-position: above
import numpy as np
from bokeh.charts import Line, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
xyvalues = np.array([[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]])
line = Line(xyvalues, title="line", legend="top_left", ylabel='Languages')
output_file('line.html')
show(line)
"""
kws['x'] = x
kws['y'] = y
return create_and_build(LineBuilder, data, **kws)
class LineBuilder(XYBuilder):
"""This is the Line class and it is in charge of plotting
Line charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the source.
"""
series_names = List(String, help="""Names that represent the items being plotted.""")
stack = Bool(default=False)
default_attributes = {'color': ColorAttr(),
'dash': DashAttr(),
'marker': MarkerAttr()}
dimensions = ['y', 'x']
column_selector = NumericalColumnsAssigner
glyph = LineGlyph
@property
def measures(self):
if isinstance(self.y.selection, list):
return self.y.selection
elif isinstance(self.x.selection, list):
return self.x.selection
else:
return None
@property
def measure_input(self):
return isinstance(self.y.selection, list) or isinstance(self.x.selection, list)
@property
def stack_flags(self):
# Check if we stack measurements and by which attributes
# This happens if we used the same series labels for dimensions as attributes
return {k: self.attr_measurement(k) for k in list(
self.attributes.keys())}
def get_id_cols(self, stack_flags):
# collect the other columns used as identifiers, that aren't a measurement name
id_cols = [self.attributes[attr].columns
for attr, stack in iteritems(stack_flags) if not stack and
self.attributes[attr].columns != self.measures and
self.attributes[attr].columns is not None]
return list(chain.from_iterable(id_cols))
def setup(self):
"""Handle input options that require transforming data and/or user selections."""
# handle special case of inputs as measures
if self.measure_input:
stack_flags = self.stack_flags
id_cols = self.get_id_cols(stack_flags)
# if we have measures input, we need to stack by something, set default
if all(attr is False for attr in list(stack_flags.values())):
stack_flags['color'] = True
# stack the measurement dimension while keeping id columns
self._stack_measures(ids=id_cols)
# set the attributes to key off of the name of the stacked measurement
source = ColumnDataSource(self._data.df)
for attr_name, stack_flag in iteritems(stack_flags):
if stack_flags[attr_name]:
default_attr = self.attributes[attr_name]
default_attr.setup(columns='series', data=source)
# Handle when to use special column names
if self.x.selection is None and self.y.selection is not None:
self.x.selection = 'index'
elif self.x.selection is not None and self.y.selection is None:
self.y.selection = 'index'
def attr_measurement(self, attr_name):
"""Detect if the attribute has been given measurement columns."""
cols = self.attributes[attr_name].columns
return (cols is not None and (cols == self.y.selection or
cols == self.x.selection))
def set_series(self, col_name):
series = self._data.df[col_name].drop_duplicates().tolist()
series = [str(item) for item in series]
self.series_names = series
def _stack_measures(self, ids, var_name='series'):
"""Stack data and keep the ids columns.
Args:
ids (list(str)): the column names that describe the measures
"""
if isinstance(self.y.selection, list):
dim = 'y'
if self.x.selection is not None:
ids.append(self.x.selection)
else:
dim = 'x'
if self.y.selection is not None:
ids.append(self.y.selection)
if len(ids) == 0:
ids = None
dim_prop = getattr(self, dim)
# transform our data by stacking the measurements into one column
self._data.stack_measures(measures=dim_prop.selection, ids=ids,
var_name=var_name)
# update our dimension with the updated data
dim_prop.set_data(self._data)
self.set_series('series')
def get_builder_attr(self):
attrs = self.properties()
return {attr: getattr(self, attr) for attr in attrs
if attr in self.glyph.properties()}
def yield_renderers(self):
build_attr = self.get_builder_attr()
# get the list of builder attributes and only pass them on if glyph supports
attrs = list(self.attributes.keys())
attrs = [attr for attr in attrs if attr in self.glyph.properties()]
for group in self._data.groupby(**self.attributes):
group_kwargs = self.get_group_kwargs(group, attrs)
group_kwargs.update(build_attr)
glyph = self.glyph(label=group.label,
x=group.get_values(self.x.selection),
y=group.get_values(self.y.selection),
**group_kwargs)
# dash=group['dash']
# save reference to composite glyph
self.add_glyph(group, glyph)
# yield each renderer produced by composite glyph
for renderer in glyph.renderers:
if self.tooltips:
renderer = add_tooltips_columns(renderer, self.tooltips, group)
yield renderer
if self.stack:
Stack().apply(self.comp_glyphs)
Dodge().apply(self.comp_glyphs)
class PointSeriesBuilder(LineBuilder):
glyph = PointGlyph
| bsd-3-clause |
NunoEdgarGub1/scikit-learn | sklearn/utils/tests/test_multiclass.py | 72 | 15350 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from functools import partial
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_label_indicator_matrix
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import is_sequence_of_sequences
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formated as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multilabel-sequences': [
[[0, 1]],
[[0], [1]],
[[1, 2, 3]],
[[1, 2, 1]], # duplicate values, why not?
[[1], [2], [0, 1]],
[[1], [2]],
[[]],
[()],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object')),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
# not currently supported sequence of sequences
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabels
assert_array_equal(assert_warns(DeprecationWarning,
unique_labels,
[(0, 1, 2), (0,), tuple(), (2, 1)]),
np.arange(3))
assert_array_equal(assert_warns(DeprecationWarning,
unique_labels,
[[0, 1, 2], [0], list(), [2, 1]]),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
# Some tests with strings input
assert_array_equal(unique_labels(["a", "b", "c"], ["d"]),
["a", "b", "c", "d"])
assert_array_equal(assert_warns(DeprecationWarning, unique_labels,
[["a", "b"], ["c"]], [["d"]]),
["a", "b", "c", "d"])
@ignore_warnings
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-sequences",
"multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
@ignore_warnings
def test_unique_labels_mixed_types():
# Mix of multilabel-indicator and multilabel-sequences
mix_multilabel_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multilabel-sequences"])
for y_multilabel, y_multiclass in mix_multilabel_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"] +
EXAMPLES["multilabel-sequences"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
# Mix string and number input type
assert_raises(ValueError, unique_labels, [[1, 2], [3]],
[["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [3]])
assert_array_equal(unique_labels([(2,), (0, 2,)], [(), ()]), [0, 2])
assert_array_equal(unique_labels([("2",), ("0", "2",)], [(), ()]),
["0", "2"])
@ignore_warnings
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group.startswith('multilabel'):
assert_, exp = assert_true, 'True'
else:
assert_, exp = assert_false, 'False'
for example in group_examples:
assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s' % (example, exp))
def test_is_label_indicator_matrix():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_label_indicator_matrix(exmpl_sparse),
msg=('is_label_indicator_matrix(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_label_indicator_matrix(example),
msg='is_label_indicator_matrix(%r) should be %s'
% (example, dense_exp))
def test_is_sequence_of_sequences():
for group, group_examples in iteritems(EXAMPLES):
if group == 'multilabel-sequences':
assert_, exp = assert_true, 'True'
check = partial(assert_warns, DeprecationWarning,
is_sequence_of_sequences)
else:
assert_, exp = assert_false, 'False'
check = is_sequence_of_sequences
for example in group_examples:
assert_(check(example),
msg='is_sequence_of_sequences(%r) should be %s'
% (example, exp))
@ignore_warnings
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg='type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example)))
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
| bsd-3-clause |
simonsfoundation/CaImAn | use_cases/eLife_scripts/train_cnns/train_net_cifar.py | 2 | 7940 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 24 12:30:19 2017
@author: agiovann
"""
'''From keras example of convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''
#%%
from __future__ import division
from __future__ import print_function
from builtins import zip
from builtins import str
from builtins import map
from builtins import range
from past.utils import old_div
import cv2
import glob
try:
cv2.setNumThreads(1)
except:
print('Open CV is naturally single threaded')
try:
if __IPYTHON__:
print(1)
# this is used for debugging purposes only. allows to reload classes
# when changed
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
except NameError:
print('Not launched under iPython')
import caiman as cm
import numpy as np
import os
import time
import pylab as pl
import psutil
import sys
from ipyparallel import Client
from skimage.external.tifffile import TiffFile
import scipy
import copy
from caiman.utils.utils import download_demo
from caiman.base.rois import extract_binary_masks_blob
from caiman.utils.visualization import plot_contours, view_patches_bar
from caiman.source_extraction.cnmf import cnmf as cnmf
from caiman.motion_correction import MotionCorrect
from caiman.components_evaluation import estimate_components_quality
from caiman.components_evaluation import evaluate_components
from caiman.tests.comparison import comparison
from caiman.motion_correction import tile_and_correct, motion_correction_piecewise
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
from sklearn.model_selection import train_test_split
from keras.layers import Dense, Dropout, Activation, Flatten
from caiman.utils.image_preprocessing_keras import ImageDataGenerator
import json as simplejson
from keras.models import model_from_json
from sklearn.utils import class_weight as cw
#%%
# the data, shuffled and split between train and test sets
with np.load('use_cases/CaImAnpaper/ground_truth_comoponents_curated.npz') as ld:
all_masks_gt = ld['all_masks_gt']
labels_gt = ld['labels_gt_cur']
#%%
batch_size = 128
num_classes = 2
epochs = 5000
test_fraction = 0.25
augmentation = True
# input image dimensions
img_rows, img_cols = 50, 50
x_train, x_test, y_train, y_test = train_test_split(
all_masks_gt, labels_gt, test_size=test_fraction)
class_weight = cw.compute_class_weight('balanced', np.unique(y_train), y_train)
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
#x_train /= 255
#x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
#%%
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
# initiate RMSprop optimizer
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=opt,
metrics=['accuracy'])
if augmentation:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
# featurewise_center=True,
# featurewise_std_normalization=True,
shear_range=0.3,
rotation_range=360,
width_shift_range=0.2,
height_shift_range=0.2,
zoom_range=[0.8, 1.2],
horizontal_flip=True,
vertical_flip=True,
random_mult_range=[.25, 2]
)
# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=epochs,
verbose=1,
class_weight=class_weight,
validation_data=(x_test, y_test))
else:
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
#%%
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
#%% Save model and weights
save_dir = 'use_cases/CaImAnpaper/'
model_name = str(datetime.datetime.now()).replace(' ', '-').replace(':', '-')
model_json = model.to_json()
json_path = os.path.join(save_dir, model_name + '.json')
with open(json_path, "w") as json_file:
json_file.write(simplejson.dumps(simplejson.loads(model_json), indent=4))
print('Saved trained model at %s ' % json_path)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name + '.h5')
model.save(model_path)
print('Saved trained model at %s ' % model_path)
#%% visualize_results
predictions = model.predict(all_masks_gt, batch_size=32, verbose=1)
cm.movie(np.squeeze(all_masks_gt[np.where(predictions[:, 0] >= 0.5)[0]])).play(
gain=3., magnification=5, fr=10)
#%%
cm.movie(np.squeeze(all_masks_gt[np.where(predictions[:, 1] >= 0.5)[0]])).play(
gain=3., magnification=5, fr=10)
#%% retrieve and test
json_file = open(json_path, 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights(model_path)
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
loaded_model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=opt,
metrics=['accuracy'])
print("Loaded model from disk")
score = loaded_model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
score = loaded_model.evaluate(x_train, y_train, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
#%%
from skimage.util.montage import montage2d
predictions = loaded_model.predict(all_masks_gt, batch_size=32, verbose=1)
cm.movie(np.squeeze(all_masks_gt[np.where(predictions[:, 1] < 0.1)[0]])).play(
gain=3., magnification=5, fr=10)
#%%
pl.imshow(montage2d(all_masks_gt[np.where((labels_gt == 0) & (
predictions[:, 1] >= 0.5) & (predictions[:, 1] >= 0.5))[0]].squeeze()))
#%%
pl.imshow(montage2d(all_masks_gt[np.where((labels_gt == 1) & (
predictions[:, 0] >= 0.5) & (predictions[:, 0] >= 0.5))[0]].squeeze()))
| gpl-2.0 |
yyjiang/scikit-learn | sklearn/cluster/tests/test_birch.py | 342 | 5603 | """
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/sklearn/linear_model/tests/test_randomized_l1.py | 57 | 4736 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import _preprocess_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
# labels should not be centered
X, _, _, _, _ = _preprocess_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| mit |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/sklearn/gaussian_process/gpr.py | 9 | 20273 | """Gaussian processes regression. """
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve_triangular
from scipy.optimize import fmin_l_bfgs_b
from sklearn.base import BaseEstimator, RegressorMixin, clone
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_X_y, check_array
from sklearn.utils.deprecation import deprecated
class GaussianProcessRegressor(BaseEstimator, RegressorMixin):
"""Gaussian process regression (GPR).
The implementation is based on Algorithm 2.1 of Gaussian Processes
for Machine Learning (GPML) by Rasmussen and Williams.
In addition to standard scikit-learn estimator API,
GaussianProcessRegressor:
* allows prediction without prior fitting (based on the GP prior)
* provides an additional method sample_y(X), which evaluates samples
drawn from the GPR (prior or posterior) at given inputs
* exposes a method log_marginal_likelihood(theta), which can be used
externally for other ways of selecting hyperparameters, e.g., via
Markov chain Monte Carlo.
Read more in the :ref:`User Guide <gaussian_process>`.
.. versionadded:: 0.18
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
alpha : float or array-like, optional (default: 1e-10)
Value added to the diagonal of the kernel matrix during fitting.
Larger values correspond to increased noise level in the observations.
This can also prevent a potential numerical issue during fitting, by
ensuring that the calculated values form a positive definite matrix.
If an array is passed, it must have the same number of entries as the
data used for fitting and is used as datapoint-dependent noise level.
Note that this is equivalent to adding a WhiteKernel with c=alpha.
Allowing to specify the noise level directly as a parameter is mainly
for convenience and for consistency with Ridge.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer : int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer == 0 implies that one
run is performed.
normalize_y : boolean, optional (default: False)
Whether the target values y are normalized, i.e., the mean of the
observed target values become zero. This parameter should be set to
True if the target values' mean is expected to differ considerable from
zero. When enabled, the normalization effectively modifies the GP's
prior based on the data, which contradicts the likelihood principle;
normalization is thus disabled per default.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : int, RandomState instance or None, optional (default: None)
The generator used to initialize the centers. If int, random_state is
the seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_ : array-like, shape = (n_samples, [n_output_dims])
Target values in training data (also required for prediction)
kernel_ : kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_ : array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in ``X_train_``
alpha_ : array-like, shape = (n_samples,)
Dual coefficients of training data points in kernel space
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(self, kernel=None, alpha=1e-10,
optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0,
normalize_y=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.alpha = alpha
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.normalize_y = normalize_y
self.copy_X_train = copy_X_train
self.random_state = random_state
@property
@deprecated("Attribute rng was deprecated in version 0.19 and "
"will be removed in 0.21.")
def rng(self):
return self._rng
@property
@deprecated("Attribute y_train_mean was deprecated in version 0.19 and "
"will be removed in 0.21.")
def y_train_mean(self):
return self._y_train_mean
def fit(self, X, y):
"""Fit Gaussian process regression model.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples, [n_output_dims])
Target values
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self._rng = check_random_state(self.random_state)
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
# Normalize target value
if self.normalize_y:
self._y_train_mean = np.mean(y, axis=0)
# demean y
y = y - self._y_train_mean
else:
self._y_train_mean = np.zeros(1)
if np.iterable(self.alpha) \
and self.alpha.shape[0] != y.shape[0]:
if self.alpha.shape[0] == 1:
self.alpha = self.alpha[0]
else:
raise ValueError("alpha must be a scalar or an array"
" with same number of entries as y.(%d != %d)"
% (self.alpha.shape[0], y.shape[0]))
self.X_train_ = np.copy(X) if self.copy_X_train else X
self.y_train_ = np.copy(y) if self.copy_X_train else y
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [(self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds))]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = \
self._rng.uniform(bounds[:, 0], bounds[:, 1])
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
self.L_ = cholesky(K, lower=True) # Line 2
except np.linalg.LinAlgError as exc:
exc.args = ("The kernel, %s, is not returning a "
"positive definite matrix. Try gradually "
"increasing the 'alpha' parameter of your "
"GaussianProcessRegressor estimator."
% self.kernel_,) + exc.args
raise
self.alpha_ = cho_solve((self.L_, True), self.y_train_) # Line 3
return self
def predict(self, X, return_std=False, return_cov=False):
"""Predict using the Gaussian process regression model
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, also its
standard deviation (return_std=True) or covariance (return_cov=True).
Note that at most one of the two can be requested.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Query points where the GP is evaluated
return_std : bool, default: False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default: False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean
Returns
-------
y_mean : array, shape = (n_samples, [n_output_dims])
Mean of predictive distribution a query points
y_std : array, shape = (n_samples,), optional
Standard deviation of predictive distribution at query points.
Only returned when return_std is True.
y_cov : array, shape = (n_samples, n_samples), optional
Covariance of joint predictive distribution a query points.
Only returned when return_cov is True.
"""
if return_std and return_cov:
raise RuntimeError(
"Not returning standard deviation of predictions when "
"returning full covariance.")
X = check_array(X)
if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior
if self.kernel is None:
kernel = (C(1.0, constant_value_bounds="fixed") *
RBF(1.0, length_scale_bounds="fixed"))
else:
kernel = self.kernel
y_mean = np.zeros(X.shape[0])
if return_cov:
y_cov = kernel(X)
return y_mean, y_cov
elif return_std:
y_var = kernel.diag(X)
return y_mean, np.sqrt(y_var)
else:
return y_mean
else: # Predict based on GP posterior
K_trans = self.kernel_(X, self.X_train_)
y_mean = K_trans.dot(self.alpha_) # Line 4 (y_mean = f_star)
y_mean = self._y_train_mean + y_mean # undo normal.
if return_cov:
v = cho_solve((self.L_, True), K_trans.T) # Line 5
y_cov = self.kernel_(X) - K_trans.dot(v) # Line 6
return y_mean, y_cov
elif return_std:
# compute inverse K_inv of K based on its Cholesky
# decomposition L and its inverse L_inv
L_inv = solve_triangular(self.L_.T, np.eye(self.L_.shape[0]))
K_inv = L_inv.dot(L_inv.T)
# Compute variance of predictive distribution
y_var = self.kernel_.diag(X)
y_var -= np.einsum("ij,ij->i", np.dot(K_trans, K_inv), K_trans)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
warnings.warn("Predicted variances smaller than 0. "
"Setting those variances to 0.")
y_var[y_var_negative] = 0.0
return y_mean, np.sqrt(y_var)
else:
return y_mean
def sample_y(self, X, n_samples=1, random_state=0):
"""Draw samples from Gaussian process and evaluate at X.
Parameters
----------
X : array-like, shape = (n_samples_X, n_features)
Query points where the GP samples are evaluated
n_samples : int, default: 1
The number of samples drawn from the Gaussian process
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the
random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
Returns
-------
y_samples : array, shape = (n_samples_X, [n_output_dims], n_samples)
Values of n_samples samples drawn from Gaussian process and
evaluated at query points.
"""
rng = check_random_state(random_state)
y_mean, y_cov = self.predict(X, return_cov=True)
if y_mean.ndim == 1:
y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T
else:
y_samples = \
[rng.multivariate_normal(y_mean[:, i], y_cov,
n_samples).T[:, np.newaxis]
for i in range(y_mean.shape[1])]
y_samples = np.hstack(y_samples)
return y_samples
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
L = cholesky(K, lower=True) # Line 2
except np.linalg.LinAlgError:
return (-np.inf, np.zeros_like(theta)) \
if eval_gradient else -np.inf
# Support multi-dimensional output of self.y_train_
y_train = self.y_train_
if y_train.ndim == 1:
y_train = y_train[:, np.newaxis]
alpha = cho_solve((L, True), y_train) # Line 3
# Compute log-likelihood (compare line 7)
log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
log_likelihood_dims -= np.log(np.diag(L)).sum()
log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
log_likelihood = log_likelihood_dims.sum(-1) # sum over dimensions
if eval_gradient: # compare Equation 5.9 from GPML
tmp = np.einsum("ik,jk->ijk", alpha, alpha) # k: output-dimension
tmp -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis]
# Compute "0.5 * trace(tmp.dot(K_gradient))" without
# constructing the full matrix tmp.dot(K_gradient) since only
# its diagonal is required
log_likelihood_gradient_dims = \
0.5 * np.einsum("ijl,ijk->kl", tmp, K_gradient)
log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1)
if eval_gradient:
return log_likelihood, log_likelihood_gradient
else:
return log_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
| mit |
walterreade/scikit-learn | sklearn/svm/classes.py | 34 | 40599 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
from ..utils.multiclass import check_classification_targets
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to ``coef_``
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
``"ovr"`` trains n_classes one-vs-rest classifiers, while ``"crammer_singer"``
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If ``"crammer_singer"`` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
``[x, self.intercept_scaling]``,
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to ``class_weight[i]*C`` for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
``coef_`` is a readonly property derived from ``raw_coef_`` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation, liblinear, uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive' (default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set ``epsilon=0``.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') decision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
.. versionadded:: 0.17
*decision_function_shape='ovr'* is recommended.
.. versionchanged:: 0.17
Deprecated *decision_function_shape='ovo' and None*.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') decision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
.. versionadded:: 0.17
*decision_function_shape='ovr'* is recommended.
.. versionchanged:: 0.17
Deprecated *decision_function_shape='ovo' and None*.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
linkmax91/bitquant | web/home/ipython/examples/python-web.py | 1 | 2343 |
# coding: utf-8
# This is an example of registering a python web service. You can access the a service which
# is registered by name /python-web by http://localhost/app/python-web
# In[ ]:
#This creates a web service located off
#
#
# To shutdown the server you will need to
# restart the kernel
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self, *param):
self.write("Hello, world param=%s" % param[0])
import bitquantutils
bitquantutils.register_tornado_handler("/python-web", MainHandler)
# In[ ]:
#Create web service off of
# http://localhost/app/python-wsgi
def simple_app(environ, start_response):
status = "200 OK"
response_headers = [("Content-type", "text/plain")]
start_response(status, response_headers)
return [b"Hello world! with WSGI Handler\n"]
import bitquantutils
bitquantutils.register_wsgi("/python-wsgi", simple_app)
# In[ ]:
#Create web service off of
# http://localhost/app/python-web-html
from IPython.display import HTML
def html_response(input):
return """<table>
<tr>
<th>Header 1</th>
<th>Header 2</th>
</tr>
<tr>
<td>row 1, cell 1</td>
<td>row 1, cell 2</td>
</tr>
<tr>
<td>row 2, cell 1</td>
<td>row 2, cell 2</td>
</tr>
</table>"""
HTML(html_response(None))
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write(html_response(None))
import bitquantutils
bitquantutils.register_tornado_handler("/python-web-html", MainHandler)
# In[ ]:
#Create web service off of
# http://localhost/app/python-web-image
get_ipython().magic('matplotlib inline')
def image_response(input):
import matplotlib
import matplotlib.pyplot as plt
import io
from matplotlib import numpy as np
x = np.arange(0,np.pi*3,.1)
y = np.sin(x)
fig = plt.figure()
plt.plot(x,y)
imgdata = io.StringIO()
fig.savefig(imgdata, format='svg')
return imgdata.getvalue()
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write(image_response(None))
self.set_header("Content-type", "image/svg")
import bitquantutils
bitquantutils.register_tornado_handler("/python-web-image", MainHandler)
# In[ ]:
import bitquantutils
bitquantutils.start_loop()
# In[ ]:
bitquantutils.unregister_all()
# In[ ]:
| apache-2.0 |
Drob-AI/The-Observer | src/mod_suggest/tree_trainer.py | 1 | 4804 | import random
import numpy as np
from sklearn import tree
from sklearn import neighbors
from sklearn import svm
from sklearn import grid_search
from sklearn.metrics import accuracy_score
from sklearn.grid_search import GridSearchCV
from sklearn import cross_validation
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import mean_squared_error
def split_sets(dataset, test_set_len):
test_set = []
original_dataset_len = len(dataset)
dataset = list(dataset)
while(len(test_set) < original_dataset_len * test_set_len):
index = random.randint(0, len(dataset) - 1)
test_set.append(dataset[index])
del dataset[index]
return ( test_set, dataset)
def split_train_result_set(set, feature_index):
transponed = np.array(set).T
results_set = transponed[feature_index]
transponed = list(transponed)
del transponed[feature_index]
x_set = np.array(transponed).T
results_set = results_set.T
return(x_set, results_set)
def train_classifier_tree(dataset, feature_index, stats):
# print(dataset, feature_index)
test_set, train_set = split_sets(dataset, 0.1)
x_train, y_train = split_train_result_set(train_set, feature_index)
x_train_test, y_train_test = split_train_result_set(test_set, feature_index)
# if( stats[feature_index]['type'] == 'string'):
clf = tree.DecisionTreeClassifier()
clf = clf.fit(x_train, y_train)
print(accuracy_score(clf.predict(x_train_test), y_train_test))
print(len(test_set), len(train_set))
# else:
# clf = tree.DecisionTreeClassifier()
# print(x_train, y_train)
# clf = clf.fit(x_train, y_train)
# print("MSE", mean_squared_error(clf.predict(x_train_test), y_train_test))
# print(len(test_set), len(train_set))
def train_classifier_tree2(dataset, feature_index, stats):
# print(dataset, feature_index)
test_set, train_set = split_sets(dataset, 0.1)
x_train, y_train = split_train_result_set(train_set, feature_index)
x_train_test, y_train_test = split_train_result_set(test_set, feature_index)
n_estimators=[100, 180]
min_samples_split=[2, 10]
clf = RandomForestClassifier()
# clf = tree.DecisionTreeClassifier()
# clf = clf.fit(x_train, y_train)
nFolds = 5
param_grid = dict(n_estimators=n_estimators, min_samples_split=min_samples_split)
# param_grid = dict(max_depth=[10, 11, 12, 15],)
cv = cross_validation.StratifiedKFold(y_train, nFolds)
grid = GridSearchCV(clf, param_grid=param_grid,cv=cv)
grid.fit(x_train, y_train)
print(accuracy_score(grid.predict(x_train_test), y_train_test))
print(len(test_set), len(train_set))
def train_knn(dataset, feature_index, stats):
test_set, train_set = split_sets(dataset, 0.1)
x_train, y_train = split_train_result_set(train_set, feature_index)
x_train_test, y_train_test = split_train_result_set(test_set, feature_index)
knn = neighbors.KNeighborsClassifier()
knn = knn.fit(x_train, y_train)
print(accuracy_score(knn.predict(x_train_test), y_train_test))
print(len(test_set), len(train_set))
def train_knn2(dataset, feature_index, stats):
test_set, train_set = split_sets(dataset, 0.1)
x_train, y_train = split_train_result_set(train_set, feature_index)
x_train_test, y_train_test = split_train_result_set(test_set, feature_index)
nFolds = 4
metrics = ['minkowski','euclidean','manhattan']
weights = ['uniform','distance']
numNeighbors = np.arange(5, 10)
param_grid = dict(metric=metrics,weights=weights,n_neighbors=numNeighbors)
cv = cross_validation.StratifiedKFold(y_train, nFolds)
grid = GridSearchCV(neighbors.KNeighborsClassifier(), param_grid=param_grid,cv=cv)
grid.fit(x_train, y_train)
print(accuracy_score(grid.predict(x_train_test), y_train_test))
def train_classifier_svm(dataset, feature_index, stats):
test_set, train_set = split_sets(dataset, 0.1)
x_train, y_train = split_train_result_set(train_set, feature_index)
x_train_test, y_train_test = split_train_result_set(test_set, feature_index)
clf = svm.SVC()
clf = clf.fit(x_train, y_train)
print(accuracy_score(clf.predict(x_train_test), y_train_test))
print(len(test_set), len(train_set))
def train_classifier_svm2(dataset, feature_index, stats):
test_set, train_set = split_sets(dataset, 0.1)
x_train, y_train = split_train_result_set(train_set, feature_index)
x_train_test, y_train_test = split_train_result_set(test_set, feature_index)
param_grid = [{'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001], 'kernel': ['rbf']}]
grid = GridSearchCV(svm.SVC(), param_grid=param_grid)
grid.fit(x_train, y_train)
print(accuracy_score(grid.predict(x_train_test), y_train_test)) | mit |
jbedorf/tensorflow | tensorflow/contrib/metrics/python/kernel_tests/histogram_ops_test.py | 24 | 9587 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for histogram_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.metrics.python.ops import histogram_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class Strict1dCumsumTest(test.TestCase):
"""Test this private function."""
def test_empty_tensor_returns_empty(self):
with self.cached_session():
tensor = constant_op.constant([])
result = histogram_ops._strict_1d_cumsum(tensor, 0)
expected = constant_op.constant([])
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_1_tensor_works(self):
with self.cached_session():
tensor = constant_op.constant([3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 1)
expected = constant_op.constant([3], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_3_tensor_works(self):
with self.cached_session():
tensor = constant_op.constant([1, 2, 3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 3)
expected = constant_op.constant([1, 3, 6], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
class AUCUsingHistogramTest(test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
def test_empty_labels_and_scores_gives_nan_auc(self):
with self.cached_session():
labels = constant_op.constant([], shape=[0], dtype=dtypes.bool)
scores = constant_op.constant([], shape=[0], dtype=dtypes.float32)
score_range = [0, 1.]
auc, update_op = histogram_ops.auc_using_histogram(labels, scores,
score_range)
variables.local_variables_initializer().run()
update_op.run()
self.assertTrue(np.isnan(auc.eval()))
def test_perfect_scores_gives_auc_1(self):
self._check_auc(
nbins=100,
desired_auc=1.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_terrible_scores_gives_auc_0(self):
self._check_auc(
nbins=100,
desired_auc=0.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_many_common_conditions(self):
for nbins in [50]:
for desired_auc in [0.3, 0.5, 0.8]:
for score_range in [[-1, 1], [-10, 0]]:
for frac_true in [0.3, 0.8]:
# Tests pass with atol = 0.03. Moved up to 0.05 to avoid flakes.
self._check_auc(
nbins=nbins,
desired_auc=desired_auc,
score_range=score_range,
num_records=100,
frac_true=frac_true,
atol=0.05,
num_updates=50)
def test_large_class_imbalance_still_ok(self):
# With probability frac_true ** num_records, each batch contains only True
# records. In this case, ~ 95%.
# Tests pass with atol = 0.02. Increased to 0.05 to avoid flakes.
self._check_auc(
nbins=100,
desired_auc=0.8,
score_range=[-1, 1.],
num_records=10,
frac_true=0.995,
atol=0.05,
num_updates=1000)
def test_super_accuracy_with_many_bins_and_records(self):
# Test passes with atol = 0.0005. Increased atol to avoid flakes.
self._check_auc(
nbins=1000,
desired_auc=0.75,
score_range=[0, 1.],
num_records=1000,
frac_true=0.5,
atol=0.005,
num_updates=100)
def _check_auc(self,
nbins=100,
desired_auc=0.75,
score_range=None,
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=10):
"""Check auc accuracy against synthetic data.
Args:
nbins: nbins arg from contrib.metrics.auc_using_histogram.
desired_auc: Number in [0, 1]. The desired auc for synthetic data.
score_range: 2-tuple, (low, high), giving the range of the resultant
scores. Defaults to [0, 1.].
num_records: Positive integer. The number of records to return.
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually
be True.
atol: Absolute tolerance for final AUC estimate.
num_updates: Update internal histograms this many times, each with a new
batch of synthetic data, before computing final AUC.
Raises:
AssertionError: If resultant AUC is not within atol of theoretical AUC
from synthetic data.
"""
score_range = [0, 1.] or score_range
with self.cached_session():
labels = array_ops.placeholder(dtypes.bool, shape=[num_records])
scores = array_ops.placeholder(dtypes.float32, shape=[num_records])
auc, update_op = histogram_ops.auc_using_histogram(
labels, scores, score_range, nbins=nbins)
variables.local_variables_initializer().run()
# Updates, then extract auc.
for _ in range(num_updates):
labels_a, scores_a = synthetic_data(desired_auc, score_range,
num_records, self.rng, frac_true)
update_op.run(feed_dict={labels: labels_a, scores: scores_a})
labels_a, scores_a = synthetic_data(desired_auc, score_range, num_records,
self.rng, frac_true)
# Fetch current auc, and verify that fetching again doesn't change it.
auc_eval = auc.eval()
self.assertAlmostEqual(auc_eval, auc.eval(), places=5)
msg = ('nbins: %s, desired_auc: %s, score_range: %s, '
'num_records: %s, frac_true: %s, num_updates: %s') % (nbins,
desired_auc,
score_range,
num_records,
frac_true,
num_updates)
np.testing.assert_allclose(desired_auc, auc_eval, atol=atol, err_msg=msg)
def synthetic_data(desired_auc, score_range, num_records, rng, frac_true):
"""Create synthetic boolean_labels and scores with adjustable auc.
Args:
desired_auc: Number in [0, 1], the theoretical AUC of resultant data.
score_range: 2-tuple, (low, high), giving the range of the resultant scores
num_records: Positive integer. The number of records to return.
rng: Initialized np.random.RandomState random number generator
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually be
True.
Returns:
boolean_labels: np.array, dtype=bool.
scores: np.array, dtype=np.float32
"""
# We prove here why the method (below) for computing AUC works. Of course we
# also checked this against sklearn.metrics.roc_auc_curve.
#
# First do this for score_range = [0, 1], then rescale.
# WLOG assume AUC >= 0.5, otherwise we will solve for AUC >= 0.5 then swap
# the labels.
# So for AUC in [0, 1] we create False and True labels
# and corresponding scores drawn from:
# F ~ U[0, 1], T ~ U[x, 1]
# We have,
# AUC
# = P[T > F]
# = P[T > F | F < x] P[F < x] + P[T > F | F > x] P[F > x]
# = (1 * x) + (0.5 * (1 - x)).
# Inverting, we have:
# x = 2 * AUC - 1, when AUC >= 0.5.
assert 0 <= desired_auc <= 1
assert 0 < frac_true < 1
if desired_auc < 0.5:
flip_labels = True
desired_auc = 1 - desired_auc
frac_true = 1 - frac_true
else:
flip_labels = False
x = 2 * desired_auc - 1
labels = rng.binomial(1, frac_true, size=num_records).astype(bool)
num_true = labels.sum()
num_false = num_records - labels.sum()
# Draw F ~ U[0, 1], and T ~ U[x, 1]
false_scores = rng.rand(num_false)
true_scores = x + rng.rand(num_true) * (1 - x)
# Reshape [0, 1] to score_range.
def reshape(scores):
return score_range[0] + scores * (score_range[1] - score_range[0])
false_scores = reshape(false_scores)
true_scores = reshape(true_scores)
# Place into one array corresponding with the labels.
scores = np.nan * np.ones(num_records, dtype=np.float32)
scores[labels] = true_scores
scores[~labels] = false_scores
if flip_labels:
labels = ~labels
return labels, scores
if __name__ == '__main__':
test.main()
| apache-2.0 |
wathen/PhD | MHD/FEniCS/MHD/CG/PicardIter_Direct/DecoupleTest/MHDnewcastle.py | 1 | 11326 | #!/usr/bin/python
# interpolate scalar gradient onto nedelec space
from dolfin import *
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
Print = PETSc.Sys.Print
# from MatrixOperations import *
import numpy as np
import matplotlib.pylab as plt
import PETScIO as IO
import common
import scipy
import scipy.io
import time as t
import BiLinear as forms
import DirectOperations as Iter
import MatrixOperations as MO
import CheckPetsc4py as CP
import ExactSol
# locals()
#Modules([forms,Iter,MO,CP,ExactSol,IO,common])
#ff
m = 5
Type = ['Full','MD','CD']
errL2u =np.zeros((m-1,1))
errH1u =np.zeros((m-1,1))
errL2p =np.zeros((m-1,1))
errL2b =np.zeros((m-1,1))
errCurlb =np.zeros((m-1,1))
errL2r =np.zeros((m-1,1))
errH1r =np.zeros((m-1,1))
l2uorder = np.zeros((m-1,1))
H1uorder =np.zeros((m-1,1))
l2porder = np.zeros((m-1,1))
l2border = np.zeros((m-1,1))
Curlborder =np.zeros((m-1,1))
l2rorder = np.zeros((m-1,1))
H1rorder = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Velocitydim = np.zeros((m-1,1))
Magneticdim = np.zeros((m-1,1))
Pressuredim = np.zeros((m-1,1))
Lagrangedim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
iterations = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
udiv = np.zeros((m-1,1))
MU = np.zeros((m-1,1))
level = np.zeros((m-1,1))
nn = 2
mm = 4
MUsave = np.zeros((mm*3,1))
MUit = np.zeros((m-1,mm*3))
print MUit[0,0]
dim = 2
ShowResultPlots = 'yes'
split = 'Linear'
MU[0]= 1e0
R = 010.0
jj = 0
for yy in xrange(1,mm+1):
MU =(R*10**(-yy))
print "++++++++",MU
for ii in xrange(0,3):
jj += 1
MUsave[jj-1] = MU
for xx in xrange(1,m):
IterType = Type[ii]
print xx
level[xx-1] = xx+2
nn = 2**(level[xx-1])
print "==================================",IterType
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn/2
parameters["form_compiler"]["quadrature_degree"] = -1
mesh = UnitSquareMesh(nn,nn)
order = 2
parameters['reorder_dofs_serial'] = False
Velocity = VectorFunctionSpace(mesh, "CG", order)
Pressure = FunctionSpace(mesh, "CG", order-1)
Magnetic = FunctionSpace(mesh, "N1curl", order)
Lagrange = FunctionSpace(mesh, "CG", order)
W = MixedFunctionSpace([Velocity,Pressure,Magnetic,Lagrange])
# W = Velocity*Pressure*Magnetic*Lagrange
Velocitydim[xx-1] = Velocity.dim()
Pressuredim[xx-1] = Pressure.dim()
Magneticdim[xx-1] = Magnetic.dim()
Lagrangedim[xx-1] = Lagrange.dim()
Wdim[xx-1] = W.dim()
print "\n\nW: ",Wdim[xx-1],"Velocity: ",Velocitydim[xx-1],"Pressure: ",Pressuredim[xx-1],"Magnetic: ",Magneticdim[xx-1],"Lagrange: ",Lagrangedim[xx-1],"\n\n"
dim = [Velocity.dim(), Pressure.dim(), Magnetic.dim(), Lagrange.dim()]
def boundary(x, on_boundary):
return on_boundary
u0, p0,b0, r0, Laplacian, Advection, gradPres,CurlCurl, gradR, NS_Couple, M_Couple = ExactSol.MHD2D(4,1)
# plot(interpolate(u0,Velocity))
p0 = interpolate(p0,Pressure)
p0.vector()[:] -= np.max(p0.vector().array() )/2
# plot(interpolate(p0,Pressure))
bcu = DirichletBC(W.sub(0),u0, boundary)
bcb = DirichletBC(W.sub(2),b0, boundary)
bcr = DirichletBC(W.sub(3),r0, boundary)
# bc = [u0,p0,b0,r0]
bcs = [bcu,bcb,bcr]
FSpaces = [Velocity,Pressure,Magnetic,Lagrange]
(u, p, b, r) = TrialFunctions(W)
(v, q, c,s ) = TestFunctions(W)
kappa = 1.0
Mu_m =10.0
# MU = 1.0
print "================================",MU
F_NS = -MU*Laplacian + Advection + gradPres - kappa*NS_Couple
F_M = Mu_m*kappa*CurlCurl + gradR - kappa*M_Couple
params = [kappa,Mu_m,MU]
u_k,p_k,b_k,r_k = common.InitialGuess(FSpaces,[u0,p0,b0,r0],[F_NS,F_M],params,Neumann=Expression(("0","0")),options ="New")
# plot(u_k)
VelPres = Velocitydim[xx-1]+Pressuredim[xx-1]
# t.sleep(10)
ones = Function(Pressure)
ones.vector()[:]=(0*ones.vector().array()+1)
pConst = -assemble(p_k*dx)/assemble(ones*dx)
p_k.vector()[:] += pConst
x = Iter.u_prev(u_k,p_k,b_k,r_k)
# plot(b_k)
ns,maxwell,CoupleTerm,Lmaxwell,Lns = forms.MHD2D(mesh, W, F_M, F_NS, u_k, b_k, params, IterType)
RHSform = forms.PicardRHS(mesh, W, u_k, p_k, b_k, r_k, params)
bcu = DirichletBC(W.sub(0),Expression(("0.0","0.0")), boundary)
bcb = DirichletBC(W.sub(2),Expression(("0.0","0.0")), boundary)
bcr = DirichletBC(W.sub(3),Expression(("0.0")), boundary)
bcs = [bcu,bcb,bcr]
eps = 1.0 # error measure ||u-u_k||
tol = 1.0E-5 # tolerance
iter = 0 # iteration counter
maxiter = 40 # max no of iterations allowed
SolutionTime = 0
outer = 0
parameters['linear_algebra_backend'] = 'uBLAS'
p = forms.Preconditioner(mesh,W,u_k, b_k,params,IterType)
# PP,Pb = assemble_system(p, Lns,bcs)
if IterType == "CD":
AA, bb = assemble_system(maxwell+ns+CoupleTerm, (Lmaxwell + Lns) - RHSform, bcs)
A,b,u = Iter.RemoveRowCol(AA,bb,VelPres)
while eps > tol and iter < maxiter:
iter += 1
if IterType == "CD":
bb = assemble((Lmaxwell + Lns) - RHSform)
for bc in bcs:
bc.apply(bb)
A,b,u = Iter.RemoveRowCol(AA,bb,VelPres)
else:
AA, bb = assemble_system(maxwell+ns+CoupleTerm, (Lmaxwell + Lns) - RHSform, bcs)
A,b,u = Iter.RemoveRowCol(AA,bb,VelPres)
ksp = PETSc.KSP().create()
pc = ksp.getPC()#.PC().create()
# P = MO.shift(A,0.000001)
ksp.setOperators(A )
del A
OptDB = PETSc.Options()
OptDB["ksp_type"] = "preonly"
OptDB["pc_type"] = "lu"
OptDB["pc_factor_mat_ordering_type"] = "rcm"
OptDB["pc_factor_mat_solver_package"] = "mumps"
# OptDB["pc_factor_shift_amount"] = 2
ksp.setFromOptions()
tic()
ksp.solve(b, u)
time = toc()
print time
SolutionTime = SolutionTime +time
del ksp, pc
u, p, b, r, eps= Iter.PicardToleranceDecouple(u,x,FSpaces,dim,"2",iter)
p.vector()[:] += - assemble(p*dx)/assemble(ones*dx)
u_k.assign(u)
p_k.assign(p)
b_k.assign(b)
r_k.assign(r)
# plot(u_k)
# plot(p_k)
uOld= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
x = Iter.u_prev(u_k,p_k,b_k,r_k)
if eps > 1e10:
iter = 0
break
# u_k,b_k,epsu,epsb=Iter.PicardTolerance(x,u_k,b_k,FSpaces,dim,"inf",iter)
MUit[xx-1,jj-1]= iter
# SolTime[xx-1] = SolutionTime/iter
ue =u0
pe = p0
be = b0
re = r0
ExactSolution = [ue,pe,be,re]
# errL2u[xx-1], errH1u[xx-1], errL2p[xx-1], errL2b[xx-1], errCurlb[xx-1], errL2r[xx-1], errH1r[xx-1] = Iter.Errors(x,mesh,FSpaces,ExactSolution,order,dim)
# if xx == 1:
# l2uorder[xx-1] = 0
# else:
# l2uorder[xx-1] = np.abs(np.log2(errL2u[xx-2]/errL2u[xx-1]))
# H1uorder[xx-1] = np.abs(np.log2(errH1u[xx-2]/errH1u[xx-1]))
# l2porder[xx-1] = np.abs(np.log2(errL2p[xx-2]/errL2p[xx-1]))
# l2border[xx-1] = np.abs(np.log2(errL2b[xx-2]/errL2b[xx-1]))
# Curlborder[xx-1] = np.abs(np.log2(errCurlb[xx-2]/errCurlb[xx-1]))
# l2rorder[xx-1] = np.abs(np.log2(errL2r[xx-2]/errL2r[xx-1]))
# H1rorder[xx-1] = np.abs(np.log2(errH1r[xx-2]/errH1r[xx-1]))
print MUit
import pandas as pd
LatexTitles = ["l","DoF"]
for x in xrange(1,mm+1):
LatexTitles.extend(["it","it","it"])
LatexValues = np.concatenate((level,Wdim,MUit), axis=1)
title = np.concatenate((np.array([[0,0]]),MUsave.T),axis=1)
MU = ["0","0"]
for x in xrange(1,mm+1):
MU.extend(["Full","MD","CD"])
LatexValues = np.vstack((title,LatexValues))
LatexTable = pd.DataFrame(LatexValues, columns = LatexTitles)
print LatexTable.to_latex()
# LatexTitles = ["l","DoFu","Dofp","V-L2","L2-order","V-H1","H1-order","P-L2","PL2-order"]
# LatexValues = np.concatenate((level,Velocitydim,Pressuredim,errL2u,l2uorder,errH1u,H1uorder,errL2p,l2porder), axis=1)
# LatexTable = pd.DataFrame(LatexValues, columns = LatexTitles)
# pd.set_option('precision',3)
# LatexTable = MO.PandasFormat(LatexTable,"V-L2","%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,'V-H1',"%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,"H1-order","%1.2f")
# LatexTable = MO.PandasFormat(LatexTable,'L2-order',"%1.2f")
# LatexTable = MO.PandasFormat(LatexTable,"P-L2","%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,'PL2-order',"%1.2f")
# print LatexTable.to_latex()
# print "\n\n Magnetic convergence"
# MagneticTitles = ["l","B DoF","R DoF","B-L2","L2-order","B-Curl","HCurl-order"]
# MagneticValues = np.concatenate((level,Magneticdim,Lagrangedim,errL2b,l2border,errCurlb,Curlborder),axis=1)
# MagneticTable= pd.DataFrame(MagneticValues, columns = MagneticTitles)
# pd.set_option('precision',3)
# MagneticTable = MO.PandasFormat(MagneticTable,"B-Curl","%2.4e")
# MagneticTable = MO.PandasFormat(MagneticTable,'B-L2',"%2.4e")
# MagneticTable = MO.PandasFormat(MagneticTable,"L2-order","%1.2f")
# MagneticTable = MO.PandasFormat(MagneticTable,'HCurl-order',"%1.2f")
# print MagneticTable.to_latex()
# print "\n\n Lagrange convergence"
# LagrangeTitles = ["l","B DoF","R DoF","R-L2","L2-order","R-H1","H1-order"]
# LagrangeValues = np.concatenate((level,Magneticdim,Lagrangedim,errL2r,l2rorder,errH1r,H1rorder),axis=1)
# LagrangeTable= pd.DataFrame(LagrangeValues, columns = LagrangeTitles)
# pd.set_option('precision',3)
# LagrangeTable = MO.PandasFormat(LagrangeTable,"R-L2","%2.4e")
# LagrangeTable = MO.PandasFormat(LagrangeTable,'R-H1',"%2.4e")
# LagrangeTable = MO.PandasFormat(LagrangeTable,"H1-order","%1.2f")
# LagrangeTable = MO.PandasFormat(LagrangeTable,'L2-order',"%1.2f")
# print LagrangeTable.to_latex()
# # # if (ShowResultPlots == 'yes'):
# plot(u_k)
# plot(b_k)
# plot(r_k)
# plot(p_k)
# # # plot(ba)
# plot(interpolate(p0,Pressure))
# # plot(ra)
# plot(interpolate(re,Lagrange))
# interactive()
interactive()
def ModuleCheck():
modulenames = set(sys.modules)&set(globals())
print modulenames
ModuleCheck()
| mit |
Srisai85/scikit-learn | sklearn/tests/test_cross_validation.py | 27 | 41664 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
| bsd-3-clause |
bmazin/ARCONS-pipeline | mosaicing/matchFinder.py | 1 | 10154 | import numpy as np
import tables
import sys
import ephem
import PyGuide as pg
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from util.ObsFile import ObsFile
import os
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtGui import *
import hotpix.hotPixels as hp
from tables import *
from util.FileName import FileName
from photometry.PSFphotometry import PSFphotometry
from util import utils
from util.popup import PopUp
import astrometry.CentroidCalc as cc
import util.ObsFileSeq as ofs
import time
#added by Neil vvvvvv 2/3/2015
class MouseMonitor():
def __init__(self):
pass
def on_click(self,event):
if event.inaxes is self.ax1:
self.xyguess1 = [event.xdata,event.ydata]
print 'Clicked: ',self.xyguess1
elif event.inaxes is self.ax2:
self.xyguess2 = [event.xdata,event.ydata]
print 'Clicked: ',self.xyguess2
def on_scroll_cbar(self,event):
if event.inaxes is self.fig1.cbar.ax:
increment=0.05
currentClim = self.fig1.cbar.mappable.get_clim()
currentRange = currentClim[1]-currentClim[0]
if event.button == 'up':
if QtGui.QApplication.keyboardModifiers()==QtCore.Qt.ControlModifier:
newClim = (currentClim[0]+increment*currentRange,currentClim[1])
elif QtGui.QApplication.keyboardModifiers()==QtCore.Qt.NoModifier:
newClim = (currentClim[0],currentClim[1]+increment*currentRange)
if event.button == 'down':
if QtGui.QApplication.keyboardModifiers()==QtCore.Qt.ControlModifier:
newClim = (currentClim[0]-increment*currentRange,currentClim[1])
elif QtGui.QApplication.keyboardModifiers()==QtCore.Qt.NoModifier:
newClim = (currentClim[0],currentClim[1]-increment*currentRange)
self.fig1.cbar.mappable.set_clim(newClim)
self.fig1.canvas.draw()
elif event.inaxes is self.fig2.cbar.ax:
increment=0.05
currentClim = self.fig2.cbar.mappable.get_clim()
currentRange = currentClim[1]-currentClim[0]
if event.button == 'up':
if QtGui.QApplication.keyboardModifiers()==QtCore.Qt.ControlModifier:
newClim = (currentClim[0]+increment*currentRange,currentClim[1])
elif QtGui.QApplication.keyboardModifiers()==QtCore.Qt.NoModifier:
newClim = (currentClim[0],currentClim[1]+increment*currentRange)
if event.button == 'down':
if QtGui.QApplication.keyboardModifiers()==QtCore.Qt.ControlModifier:
newClim = (currentClim[0]-increment*currentRange,currentClim[1])
elif QtGui.QApplication.keyboardModifiers()==QtCore.Qt.NoModifier:
newClim = (currentClim[0],currentClim[1]-increment*currentRange)
self.fig2.cbar.mappable.set_clim(newClim)
self.fig2.canvas.draw()
def connect(self):
self.cid1 = self.fig1.canvas.mpl_connect('button_press_event', self.on_click)
self.fig1.cbar = self.fig1.colorbar(self.handleMatshow1)
cid1 = self.fig1.canvas.mpl_connect('scroll_event', self.on_scroll_cbar)
self.cid2 = self.fig2.canvas.mpl_connect('button_press_event', self.on_click)
self.fig2.cbar = self.fig2.colorbar(self.handleMatshow2)
cid2 = self.fig2.canvas.mpl_connect('scroll_event', self.on_scroll_cbar)
def getUserObjectGuess(images, norm = None):
'''
This is designed to allow the user to look at two frames, determine whether they contain matching stars,
and if so click on their position withing the image.
'''
flagList = np.array([1,1])
xyguess1 = [0,0]
xyguess2 = [0,0]
image1 = images[0]
image2 = images[1]
map = MouseMonitor()
map.fig1 = plt.figure(1)
map.ax1 = map.fig1.add_subplot(111)
map.ax1.set_title('Star Position Guess')
map.handleMatshow1 = map.ax1.matshow(image1,cmap = mpl.cm.gnuplot2, origin = 'lower', norm=norm)
map.fig2 = plt.figure(2)
map.ax2 = map.fig2.add_subplot(111)
map.ax2.set_title('Star Position Guess')
map.handleMatshow2 = map.ax2.matshow(image2,cmap = mpl.cm.gnuplot2, origin = 'lower', norm=norm)
map.connect()
plt.show()
try:
xyguess1 = map.xyguess1
print 'Guess1 = ' + str(xyguess1)
flagList[0]=0
except AttributeError:
pass
try:
xyguess2 = map.xyguess2
print 'Guess2 = ' + str(xyguess2)
flagList[1]=0
except AttributeError:
pass
xyguesses = np.array([xyguess1,xyguess2])
return xyguesses, flagList
def ObjectFinder(frames, data, RA = None, Dec = None, radiusOfSearch=10, usePsfFit=False):
'''
This allows the user to determine the exact positioning of a star withing a given frame, given that
the star exists in two frames. two images pop up, if they contain the same star, click on it in both images,
and the exact pixel positioning will be determine via centroiding. Some parts are fudged for now.
Inputs:
frames - This is an array of images corresponding to the various frames used for the mosaic. note that
the way this is written as of now requires that the first and last frame include the same star.
I think this is typical so it should not be a problem.
data - this is an array of dictionaries corresponding to each frame. This is generated from
getFrameDict() which I have included in the ObsFileSeq class. This is required.
RA/Dec - these are not used as of now.
Outputs:
all the parameters that are needed for setRm()
Ben also suggested that a cross-correlating technique may be useful for matching frames with no star in them.
What do you guys think? I need to look into it more but I believe that this code can be expanded to also do
cross-correlation. - Neil
'''
offsRA = []
offsDec = []
iframe = []
meanTime = []
for i in range(len(data)):
offsRA.append(data[i]["offsRA"])
offsDec.append(data[i]["offsDec"])
iframe.append(data[i]["iframe"])
meanTime.append(data[i]["meanTime"])
offsRA = np.array(offsRA)
offsDec = np.array(offsDec)
iframe = np.array(iframe)
meanTime = np.array(meanTime)
dpp = []
ang = []
for i in range(1, len(frames)):
images = np.array([frames[i-1], frames[i]])
oRA = np.array([offsRA[i-1], offsRA[i]])
oDec = np.array([offsDec[i-1], offsDec[i]])
print 'Looking for matching Stars...'
print 'frame: ', iframe[i-1], 'Offset RA: ', offsRA[i-1], 'Offset Dec: ', offsDec[i-1]
print 'frame: ', iframe[i], 'Offset RA: ', offsRA[i], 'Offset Dec: ', offsDec[i]
xyguesses, flagList = getUserObjectGuess(images)
if flagList[1]==0 and flagList[0]==0:
print 'match found! - determining centroid positions'
#xycenter1, flag1
cenDict1 = cc.centroidImage(images[1], xyguesses[1], radiusOfSearch=radiusOfSearch, doDS9=False, usePsfFit=usePsfFit)
#xycenter0, flag0
cenDict0 = cc.centroidImage(images[0], xyguesses[0], radiusOfSearch=radiusOfSearch, doDS9=False, usePsfFit=usePsfFit)
print 'Success! Matching stars at: ', cenDict0['xycenter'], cenDict1['xycenter']
#rc1 = np.array(xycenter1)
#rc0 = np.array(xycenter0)
rc1 = np.array(cenDict1['xycenter'])
rc0 = np.array(cenDict0['xycenter'])
dCol = rc1[0] - rc0[0]
dRow = rc1[1] - rc0[1]
dPix = math.sqrt(((rc1-rc0)**2).sum())
#center ra,dec of fram calculated from offsets
dRA = oRA[1]-oRA[0] #arcseconds
dDec = oDec[1]-oDec[0]
dDeg = math.sqrt(dRA**2+dDec**2)/3600 #degrees
degPerPix = dDeg/dPix #plate scale
#rotation
thetaPix = math.atan2(dCol,dRow) #angle from verticle
thetaSky = math.atan2(dRA, dDec) #angle from north
theta = thetaPix-thetaSky #degrees
dpp.append(degPerPix)
ang.append(theta)
elif flagList[1]==1 or flagList[0]==1:
print 'no star found'
dpp = np.array(dpp)
#print dpp
degPerPix = np.mean(dpp)
#print degPerPix
ang = np.array(ang)
print ang
theta = np.mean(ang)
print theta
## Pick two frames where the ra,dec offset is zero,
# usually the beginning and ending frames
print 'Matching stars from the first and last frames'
images = [frames[0], frames[-1]]
print 'frame: ', iframe[0], 'Offset RA: ', offsRA[0], 'Offset Dec: ', offsDec[0]
print 'frame: ', iframe[-1], 'Offset RA: ', offsRA[-1], 'Offset Dec: ', offsDec[-1]
xyguesses, flagList = getUserObjectGuess(images)
#xycenter1, flag1
cenDict1 = cc.centroidImage(images[1], xyguesses[1], radiusOfSearch=radiusOfSearch, doDS9=False, usePsfFit=usePsfFit)
#xycenter0, flag0
cenDict0 = cc.centroidImage(images[0], xyguesses[0], radiusOfSearch=radiusOfSearch, doDS9=False, usePsfFit=usePsfFit)
print 'Success! Matching stars at: ', cenDict0['xycenter'], cenDict1['xycenter']
#start here
rcA = np.array(cenDict0['xycenter'])
rcB = np.array(cenDict1['xycenter'])
sct = math.cos(theta)*degPerPix
sst = math.sin(theta)*degPerPix
# This rotation matrix converts from row,col to ra,dec in degrees
rm = np.array([[sct,-sst],[sst,sct]])
rdA = rm.dot(rcA)
rdB = rm.dot(rcB)
deltaRa = rdB[0]-rdA[0]
deltaTime = meanTime[-1] - meanTime[0]
raArcsecPerSec = 3600*deltaRa/deltaTime
return degPerPix, theta, raArcsecPerSec
| gpl-2.0 |
chraibi/ptr5parser | scripts/plot_framewise.py | 1 | 1382 | # plots the trajectories framewise and produce png files
from sys import argv
import os
import glob
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
if len(argv) <= 1:
print("usage: %s <filename>"%argv[0])
exit(">>>>exit<<<<")
filename = argv[1]
print("load file %s..."%filename)
D = np.loadtxt(filename)
frames = np.unique(D[:, 1])
xmin = np.min(D[:, 2])
xmax = np.max(D[:, 2])
ymin = np.min(D[:, 3])
ymax = np.max(D[:, 3])
fig = plt.figure()
z = np.sort(D[:, 4])
_3D = 0
if z[0] != z[-1]:
ax = fig.add_subplot(111, projection='3d')
print("3D")
_3D = 1
for frame in frames:
d = D[D[:, 1] == frame]
x = d[:, 2]
y = d[:, 3]
if _3D:
z = d[:, 4]
plt.plot(x, y, z, "o", color='b', ms=10)
else:
plt.plot(x, y, "o", color='b', ms=10)
plt.xlim([xmin-0.5, xmax+0.5])
plt.ylim([ymin-0.5, ymax+0.5])
plt.savefig("%.5d.png"%frame)
print("%.5d.png"%frame)
plt.clf()
make_movie = r'ffmpeg -y -framerate 16 -pattern_type glob -i "*.png" -r 25 -f mpeg -vcodec mpeg1video -ar 48000 -b:v 5000k -b:a 128k -acodec mp2 -ar 44100 -ac 1 demo.mpg'
try:
err = os.system(make_movie)
except err:
print("command does not exist. err = %d"%err)
png_files = glob.glob("*.png")
if png_files:
print("remove png files")
for f in png_files:
os.remove(f)
| lgpl-3.0 |
henridwyer/scikit-learn | sklearn/preprocessing/tests/test_data.py | 14 | 37957 | import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.utils.validation import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
def test_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X = np.ones(5)
assert_array_equal(scale(X, with_mean=False), X)
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# Constant feature.
X = np.zeros(5)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_robust_scaler_2d_arrays():
"""Test robust scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
"""Check RobustScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
"""Check MaxAbsScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# sparse data
X_csr = sparse.csr_matrix(X)
X_trans = scaler.fit_transform(X_csr)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans.A, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv.A)
def test_maxabs_scaler_large_negative_value():
"""Check MaxAbsScaler on toy data with a large negative value"""
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]])
)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
| bsd-3-clause |
ywcui1990/nupic.research | projects/nlp/run_tm_learning.py | 11 | 5601 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Script to run temporal memory on NLP documents
"""
import argparse
from textwrap import TextWrapper
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
import nupic
from nupic.data.file_record_stream import FileRecordStream
from htmresearch.frameworks.nlp.classification_model import ClassificationModel
from htmresearch.frameworks.nlp.model_factory import (
createModel, getNetworkConfig)
from htmresearch.support.csv_helper import readDataAndReshuffle
plt.ion()
wrapper = TextWrapper(width=100)
def getTMRegion(network):
tmRegion = None
for region in network.regions.values():
regionInstance = region
if type(regionInstance.getSelf()) is nupic.regions.TPRegion.TPRegion:
tmRegion = regionInstance.getSelf()
return tmRegion
def instantiateModel(args):
"""
Return an instance of the model we will use.
"""
# Some values of K we know work well for this problem for specific model types
kValues = {"keywords": 21, "docfp": 3}
# Create model after setting specific arguments required for this experiment
args.networkConfig = getNetworkConfig(args.networkConfigPath)
args.k = kValues.get(args.modelName, 1)
args.numLabels = 2
model = createModel(**vars(args))
return model
def trainModel(args, model, trainingData, labelRefs):
"""
Train the given model on trainingData. Return the trained model instance.
"""
tmRegion = getTMRegion(model.network)
print
print "=======================Training model on sample text================"
for recordNum, doc in enumerate(trainingData):
document = doc[0]
labels = doc[1]
docId = doc[2]
if args.verbosity > 0:
print
print "Document=", wrapper.fill(document)
print "label=", labelRefs[labels[0]], "id=", docId
model.trainDocument(document, labels, docId)
numActiveCols = tmRegion._tfdr.mmGetTraceActiveColumns().makeCountsTrace().data
numPredictedActiveCells = \
tmRegion._tfdr.mmGetTracePredictedActiveCells().makeCountsTrace().data
if args.verbosity > 0:
print "Word # %s, Avg Active Cols # %s, Avg predicted-active cell # %s " % (
len(numActiveCols),
np.mean(np.array(numActiveCols)),
np.mean(np.array(numPredictedActiveCells))
)
tmRegion._tfdr.mmClearHistory()
return model
def runExperiment(args):
"""
Create model according to args, train on training data, save model,
restore model, test on test data.
"""
args.numLabels = 2
(trainingData, labelRefs, documentCategoryMap,
documentTextMap) = readDataAndReshuffle(args)
# Create model
model = instantiateModel(args)
model = trainModel(args, model, trainingData, labelRefs)
# TODO: Visualize prediction quality
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("-c", "--networkConfigPath",
default="data/network_configs/tm_knn_4k_retina.json",
help="Path to JSON specifying the network params.",
type=str)
parser.add_argument("-m", "--modelName",
default="htm",
type=str,
help="Name of model class. Options: [keywords,htm,docfp]")
parser.add_argument("--retinaScaling",
default=1.0,
type=float,
help="Factor by which to scale the Cortical.io retina.")
parser.add_argument("--retina",
default="en_associative_64_univ",
type=str,
help="Name of Cortical.io retina.")
parser.add_argument("--apiKey",
default=None,
type=str,
help="Key for Cortical.io API. If not specified will "
"use the environment variable CORTICAL_API_KEY.")
parser.add_argument("--modelDir",
default="MODELNAME.checkpoint",
help="Model will be saved in this directory.")
parser.add_argument("-v", "--verbosity",
default=1,
type=int,
help="verbosity 0 will print out experiment steps, "
"verbosity 1 will include results, and verbosity > "
"1 will print out preprocessed tokens and kNN "
"inference metrics.")
args = parser.parse_args()
# By default set checkpoint directory name based on model name
if args.modelDir == "MODELNAME.checkpoint":
args.modelDir = args.modelName + ".checkpoint"
print "Save dir: ", args.modelDir
runExperiment(args)
| agpl-3.0 |
hugobowne/scikit-learn | sklearn/utils/testing.py | 4 | 27143 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# Giorgio Patrini
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import struct
import scipy as sp
import scipy.io
from functools import wraps
from operator import itemgetter
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
from numpy.testing import assert_approx_equal
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
from sklearn.cluster import DBSCAN
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal",
"assert_approx_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the backward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier", "MultiOutputRegressor",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV",
"SelectFromModel"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if (".tests." in modname):
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
# itemgetter is used to ensure the sort does not extend to the 2nd item of
# the tuple
return sorted(set(estimators), key=itemgetter(0))
def set_random_state(estimator, random_state=0):
"""Set random state of an estimator if it has the `random_state` param.
Classes for whom random_state is deprecated are ignored. Currently DBSCAN
is one such class.
"""
if isinstance(estimator, DBSCAN):
return
if "random_state" in estimator.get_params():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def skip_if_32bit(func):
"""Test decorator that skips tests on 32bit platforms."""
@wraps(func)
def run_test(*args, **kwargs):
bits = 8 * struct.calcsize("P")
if bits == 32:
raise SkipTest('Test skipped on 32bit platforms.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
warnings.warn("if_not_mac_os is deprecated in 0.17 and will be removed"
" in 0.19: use the safer and more generic"
" if_safe_multiprocessing_with_blas instead",
DeprecationWarning)
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def if_safe_multiprocessing_with_blas(func):
"""Decorator for tests involving both BLAS calls and multiprocessing
Under POSIX (e.g. Linux or OSX), using multiprocessing in conjunction with
some implementation of BLAS (or other libraries that manage an internal
posix thread pool) can cause a crash or a freeze of the Python process.
In practice all known packaged distributions (from Linux distros or
Anaconda) of BLAS under Linux seems to be safe. So we this problem seems to
only impact OSX users.
This wrapper makes it possible to skip tests that can possibly cause
this crash under OS X with.
Under Python 3.4+ it is possible to use the `forkserver` start method
for multiprocessing to avoid this issue. However it can cause pickling
errors on interactively defined functions. It therefore not enabled by
default.
"""
@wraps(func)
def run_test(*args, **kwargs):
if sys.platform == 'darwin':
raise SkipTest(
"Possible multi-process bug with some BLAS")
return func(*args, **kwargs)
return run_test
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independence)"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| bsd-3-clause |
Gorbagzog/StageIAP | MCMC_SHMR_Candels.py | 1 | 25461 | #!/usr/bin/env python3
# -*-coding:Utf-8 -*
"""Use MCMC to find the stellar mass halo mass relation.
Based on the Behroozi et al 2010 paper.
Use a parametrization of the SHMR, plus a given HMF to find the expected SMF and compare it
to the observed SMF with its uncertainties using a likelihod maximisation.
Started on december 18th by Louis Legrand at IAP and IAS.
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
import emcee
from astropy.cosmology import LambdaCDM
import scipy.optimize as op
from scipy import signal
import corner
from getdist import plots, MCSamples
import time
def load_smf():
# Code is copied from IaryDavidzonSMF.py as of 12 june
# redshifts of the Candels+15 data
global redshifts
global numzbin
redshifts = np.array([3.5, 4.5, 5.5, 6.5, 7.5])
numzbin = np.size(redshifts)-1
global smf_candels
smf_candels = []
for i in range(numzbin):
smf_candels.append(np.loadtxt(
# Select the SMFs to use : JEWELS or v2
# '../Data/Candels/grazian15_68CL_z' + str(i+4) + '_JEWELS.txt')
'../Data/Candels/grazian15_68CL_v2_z' + str(i+4) + '.txt')
)
"""Adapt SMF to match the Bolshoi-Planck Cosmology"""
# Bolshoi-Planck cosmo : (flat LCMD)
# Om = 0.3089, Ol = 0.6911, Ob = 0.0486, h = 0.6774, s8 = 0.8159, ns = 0.9667
BP_Cosmo = LambdaCDM(H0=67.74, Om0=0.3089, Ode0=0.6911)
# CANDELS+17 SMF cosmo : (flat LCDM) (same as Davidzon17_COSMO)
# Om = 0.3, Ol = 0.7, h=0.7
D17_Cosmo = LambdaCDM(H0=70, Om0=0.3, Ode0=0.7)
for i in range(numzbin):
# Correction of the comoving Volume :
VmaxD17 = D17_Cosmo.comoving_volume(redshifts[i+1]) - D17_Cosmo.comoving_volume(redshifts[i])
VmaxBP = BP_Cosmo.comoving_volume(redshifts[i+1]) - BP_Cosmo.comoving_volume(redshifts[i])
# VmaxD17 = get_Vmax_mod.main(redshifts[i], redshifts[i+1], cosmo=[70, 0.3, 0.7])
# VmaxBP = get_Vmax_mod.main(redshifts[i], redshifts[i+1], cosmo=[67.74, 0.3089, 0.6911])
# Add the log, equivalent to multiply by VmaxD17/VmaxBP
smf_candels[i][:, 1] = smf_candels[i][:, 1] + np.log10(VmaxD17/VmaxBP)
smf_candels[i][:, 2] = smf_candels[i][:, 2] + np.log10(VmaxD17/VmaxBP)
smf_candels[i][:, 3] = smf_candels[i][:, 3] + np.log10(VmaxD17/VmaxBP)
# Correction of the measured stellar mass
# Equivalent to multiply by (BP_Cosmo.H0/D17_Cosmo.H0)**-2
smf_candels[i][:, 0] = smf_candels[i][:, 0] - 2 * np.log10(BP_Cosmo.H0/D17_Cosmo.H0)
def load_hmf():
"""Load HMF from Bolshoi Planck simulation"""
# redshifts of the BolshoiPlanck files
redshift_haloes = np.arange(0, 10, step=0.1)
numredshift_haloes = np.size(redshift_haloes)
"""Definition of hmf_bolshoi columns :
hmf_bolshoi[redshift][:,0] = Log10(mass) [Msun]
hmf_bolshoi[redshift][:,1] = Log10(cen_mf), ie central haloes mass function
(density) [1/Mpc^3]
hmf_bolshoi[redshift][:,2] = Log10(all_macc_mf), ie all haloes mass function
(density) [1/Mpc^3]
"""
global hmf_bolshoi
hmf_bolshoi_tot = []
for i in range(numredshift_haloes):
hmf_bolshoi_tot.append(
np.loadtxt('../Data/HMFBolshoiPlanck/mf_planck/mf_planck_z' +
'{:4.3f}'.format(redshift_haloes[i]) + '_mvir.dat'))
"""Select the redhifts slices that matches the slices of Iary"""
global redshift_id_selec
redshift_id_selec = np.empty(numzbin)
for i in range(numzbin):
redshift_id_selec[i] = np.argmin(
np.abs(redshift_haloes - (redshifts[i] + redshifts[i + 1]) / 2))
redshift_id_selec = redshift_id_selec.astype(int)
print('Redshifts of Iari SMFs : ' + str((redshifts[:-1] + redshifts[1:]) / 2))
print('Closest redshifts for Bolshoi HMFs : '
+ str(redshift_haloes[redshift_id_selec]))
hmf_bolshoi = []
for i in redshift_id_selec:
hmf_bolshoi.append(hmf_bolshoi_tot[i])
"""Function definitions for computation of the theroretical SFM phi_true"""
def logMh(logMs, M1, Ms0, beta, delta, gamma):
# SM-HM relation
return M1 + beta*(logMs - Ms0) + (10 ** (delta * (logMs - Ms0))) / (1 + (10 ** (-gamma * (logMs - Ms0)))) - 0.5
# Ms = 10**logMs
# logMh = M1 + beta * np.log10(Ms / 10**Ms0) + (Ms / 10**Ms0)**delta / (1 + (Ms / 10**Ms0)**(-gamma)) - 0.5
# return logMh
def log_phi_direct(logMs, idx_z, M1, Ms0, beta, delta, gamma):
# SMF obtained from the SM-HM relation and the HMF
epsilon = 0.0001
log_Mh1 = logMh(logMs, M1, Ms0, beta, delta, gamma)
log_Mh2 = logMh(logMs + epsilon, M1, Ms0, beta, delta, gamma)
# print(logMs)
# print(log_Mh1, log_Mh2)
# index_Mh = np.argmin(np.abs(hmf_bolshoi[idx_z][:, 0] - log_Mh1))
# Select the index of the HMF corresponing to the halo masses
index_Mh = np.argmin(
np.abs(
np.tile(hmf_bolshoi[idx_z][:, 0], (len(log_Mh1), 1)) -
np.transpose(np.tile(log_Mh1, (len(hmf_bolshoi[idx_z][:, 0]), 1)))
), axis=1)
# print(np.tile(hmf_bolshoi[idx_z][:, 0], (len(log_Mh1), 1)))
# print(np.transpose(np.tile(log_Mh1, (len(hmf_bolshoi[idx_z][:, 0]), 1))))
log_phidirect = hmf_bolshoi[idx_z][index_Mh, 2] + np.log10((log_Mh2 - log_Mh1)/epsilon)
# print(np.log10((log_Mh2 - log_Mh1)/epsilon))
# Keep only points where the halo mass is defined in the HMF
log_phidirect[log_Mh1 > hmf_bolshoi[idx_z][-1, 0]] = -1000
log_phidirect[log_Mh1 < hmf_bolshoi[idx_z][0, 0]] = -1000
# print(log_phidirect)
# print(hmf_bolshoi[idx_z][index_Mh, 2])
# print(log_phidirect)
return log_phidirect
def log_phi_true(logMs, idx_z, M1, Ms0, beta, delta, gamma, ksi):
# Use the approximation of the convolution defined in Behroozi et al 2010 equation (3)
epsilon = 0.01 * logMs
logphi1 = log_phi_direct(logMs, idx_z, M1, Ms0, beta, delta, gamma)
logphi2 = log_phi_direct(logMs + epsilon, idx_z, M1, Ms0, beta, delta, gamma)
logphitrue = logphi1 + ksi**2 / 2 * np.log(10) * ((logphi2 - logphi1)/epsilon)**2
return logphitrue
# def phi_expect(z1, z2, logMs, M1, Ms0, beta, delta, gamma, ksi):
# # Take into account that the observed SMF is for a range of redshift
# numpoints = 10
# redshifts = np.linspace(z1, z2, num=numpoints)
# top = 0
# bot = 0
# for i in range(numpoints - 1):
# dVc = BP_Cosmo.comoving_volume(redshifts[i+1]) - BP_Cosmo.comoving_volume(redshifts[i])
# top += phi_true(redshifts[i], logMs, M1, Ms0, beta, delta, gamma, ksi) * dVc
# bot += dVc
# return top/bot
def chi2(idx_z, M1, Ms0, beta, delta, gamma, ksi):
# return the chi**2 between the observed and the expected SMF
# select = np.where(np.logical_and(
# smf_candels[idx_z][:, 1] > -6, # select points where the smf is defined
# smf_candels[idx_z][:, 3] < 900))[0] # select points where the error bar is defined
select = np.where(smf_candels[idx_z][:, 1] > -6) # select points where the smf is defined
# We choose to limit the fit only fro abundances higher than 10**-6
logMs = smf_candels[idx_z][select, 0]
pred = log_phi_true(logMs, idx_z, M1, Ms0, beta, delta, gamma, ksi)
chi2 = np.sum(
((pred -
#smf_candels[idx_z][select, 1]) / ((smf_candels[idx_z][select, 3] + smf_candels[idx_z][select, 2])/2))**2
smf_candels[idx_z][select, 1]) / smf_candels[idx_z][select, 2])**2
)
return chi2
def chi2_noksi(idx_z, M1, Ms0, beta, delta, gamma):
# return the chi**2 between the observed and the expected SMF
# select = np.where(np.logical_and(
# smf_candels[idx_z][:, 1] > -6, # select points where the smf is defined
# smf_candels[idx_z][:, 3] < 900))[0] # select points where the error bar is defined
select = np.where(smf_candels[idx_z][:, 1] > -10) # select points where the smf is defined
# We choose to limit the fit only fro abundances higher than 10**-6
logMs = smf_candels[idx_z][select, 0]
pred = log_phi_direct(logMs, idx_z, M1, Ms0, beta, delta, gamma)
chi2 = np.sum(
((pred -
# When using the VmaxFit2D (give the bands and not the sigma)
smf_candels[idx_z][select, 1]) / ((smf_candels[idx_z][select, 3] - smf_candels[idx_z][select, 2])/2))**2
)
# print( (pred - smf_candels[idx_z][select, 1]) / ((smf_candels[idx_z][select, 3] + smf_candels[idx_z][select, 2])/2))
return chi2
# def loglike(theta, idx_z):
# # return the likelihood
# # print(theta)
# # bouds for the idx_z = 0
# M1, Ms0, beta, delta, gamma, ksi = theta[:]
# if beta < 0.1 or delta < 0.1 or gamma < 0:
# return -np.inf
# if beta > 1 or delta > 1 or gamma > 3:
# return -np.inf
# if M1 < 11 or M1 > 13 or Ms0 < 10 or Ms0 > 12:
# return -np.inf
# if ksi < 0 or ksi > 1:
# return -np.inf
# else:
# return -chi2(idx_z, M1, Ms0, beta, delta, gamma, ksi)/2
def loglike(theta, idx_z):
# return the likelihood
# bounds for the idx_z = 1
M1, Ms0, beta, delta, gamma, ksi = theta[:]
if beta < 0.3 or delta < 0.5 or gamma < 1:
return -np.inf
if beta > 0.6 or delta > 0.7 or gamma > 2.7:
return -np.inf
if M1 < 12 or M1 > 13 or Ms0 < 10 or Ms0 > 12:
return -np.inf
if ksi < 0 or ksi > 1:
return -np.inf
else:
return -chi2(idx_z, M1, Ms0, beta, delta, gamma, ksi)/2
def loglike_noksi(theta, idx_z):
# return the likelihood for a fixed ksi
# print(theta)
M1, Ms0, beta, delta, gamma = theta[:]
if idx_z < 5 :
if beta < 0 or delta < 0 or gamma < 0:
return -np.inf
if beta > 1 or delta > 1 or gamma > 5:
return -np.inf
if M1 < 11 or M1 > 13.2 or Ms0 < 10 or Ms0 > 12:
return -np.inf
else:
return -chi2_noksi(idx_z, M1, Ms0, beta, delta, gamma)/2
elif idx_z == 5 :
if beta < 0 or delta < 0 or gamma < 0:
return -np.inf
if beta > 1 or delta > 1 or gamma > 5:
return -np.inf
if M1 < 11.5 or M1 > 14 or Ms0 < 11 or Ms0 > 12:
return -np.inf
else:
return -chi2_noksi(idx_z, M1, Ms0, beta, delta, gamma)/2
elif idx_z == 6 :
if beta < 0 or delta < 0 or gamma < 0:
return -np.inf
if beta > 1 or delta > 1 or gamma > 5:
return -np.inf
if M1 < 12 or M1 > 15 or Ms0 < 10 or Ms0 > 13:
return -np.inf
else:
return -chi2_noksi(idx_z, M1, Ms0, beta, delta, gamma)/2
elif idx_z > 6 :
if beta < 0 or delta < 0 or gamma < 0:
return -np.inf
if beta > 1 or delta > 1 or gamma > 5:
return -np.inf
if M1 < 12 or M1 > 15 or Ms0 < 10 or Ms0 > 15:
return -np.inf
else:
return -chi2_noksi(idx_z, M1, Ms0, beta, delta, gamma)/2
def negloglike(theta, idx_z):
return -loglike(theta, idx_z)
def negloglike_noksi(theta, idx_z):
return -loglike_noksi(theta, idx_z)
"""Find maximum likelihood estimation"""
def maxlikelihood(idx_z, theta0, bounds):
load_smf()
load_hmf()
# idx_z = 0
# theta0 = np.array([11, 10, 0.1, 0.1, 1])
# bounds = ((11, 13), (10, 12), (0, 1), (0, 1), (0, 3), (0, 1))
# results = op.minimize(negloglike_noksi, theta0, bounds=bounds, args=(idx_z), method='TNC')
# results = op.basinhopping(negloglike, theta0, niter=1, T=1000, minimizer_kwargs={'args': idx_z})
# results = op.minimize(negloglike_noksi, theta0, args=(idx_z), method='Nelder-Mead', options={'fatol':10**-6})
results = op.minimize(negloglike, theta0, args=(idx_z), method='Nelder-Mead', options={'fatol':10**-6})
print(results)
"""Plots"""
def plotSMF_noksi(idx_z, iterations, burn):
load_smf()
load_hmf()
chain = np.load("../MCMC_Candels/Chain/Chain_noksi_z" + str(idx_z) + "_niter=" + str(iterations) + ".npy")
samples = chain[:, burn:, :].reshape((-1, chain.shape[2]))
select = np.where(np.logical_and(
smf_candels[idx_z][:, 1] > -10, # select points where the smf is defined
smf_candels[idx_z][:, 3] < 900))[0] # select points where the error bar is defined
logMs = smf_candels[idx_z][select, 0]
plt.figure()
plt.errorbar(logMs, smf_candels[idx_z][select, 1],
yerr=[smf_candels[idx_z][select, 1] - smf_candels[idx_z][select, 3],
smf_candels[idx_z][select, 2] - smf_candels[idx_z][select, 1]], fmt='o')
plt.ylim(-6, 0)
for M1, Ms0, beta, delta, gamma in samples[np.random.randint(len(samples), size=100)]:
logphi = log_phi_direct(logMs, idx_z, M1, Ms0, beta, delta, gamma)
plt.plot(logMs, logphi, color="k", alpha=0.1)
# plt.show()
plt.savefig('../MCMC_Candels/Plots/SMF_noksi'+ str(idx_z) + "_niter=" + str(iterations) + '.pdf')
def plotSMF(idx_z, iterations, burn):
load_smf()
load_hmf()
chain = np.load("../MCMC_Candels/Chain/Chain_ksi_z" + str(idx_z) + "_niter=" + str(iterations) + ".npy")
samples = chain[:, burn:, :].reshape((-1, chain.shape[2]))
select = np.where(smf_candels[idx_z][:, 1] > -1000)[0]
logMs = smf_candels[idx_z][select, 0]
plt.figure()
plt.errorbar(logMs, smf_candels[idx_z][select, 1],
yerr=[smf_candels[idx_z][select, 3], smf_candels[idx_z][select, 2]], fmt='o')
plt.ylim(-6, 0)
for M1, Ms0, beta, delta, gamma, ksi in samples[np.random.randint(len(samples), size=100)]:
logphi = log_phi_true(logMs, idx_z, M1, Ms0, beta, delta, gamma, ksi)
plt.plot(logMs, logphi, color="k", alpha=0.1)
# plt.show()
plt.savefig('../MCMC_Candels/Plots/SMF_ksi' + str(idx_z) + "_niter=" + str(iterations) + '.pdf')
def plotSMHM(idx_z, iterations, burn):
load_smf()
load_hmf()
chain = np.load("../MCMC_Candels/Chain/Chain_ksi_z" + str(idx_z) + "_niter=" + str(iterations) + ".npy")
samples = chain[:, burn:, :].reshape((-1, chain.shape[2]))
logMs = np.linspace(9, 11.5, num=200)
plt.figure()
for M1, Ms0, beta, delta, gamma, ksi in samples[np.random.randint(len(samples), size=100)]:
logmhalo = logMh(logMs, M1, Ms0, beta, delta, gamma)
plt.plot(logmhalo, logMs-logmhalo, color="k", alpha=0.1)
plt.show()
plt.savefig('../MCMC_Candels/Plots/SMHM_ksi' + str(idx_z) + "_niter=" + str(iterations) + '.pdf')
def plotSMHM_noksi(idx_z, iterations, burn):
load_smf()
load_hmf()
chain = np.load("../MCMC_Candels/Chain/Chain_noksi_z" + str(idx_z) + "_niter=" + str(iterations) + ".npy")
samples = chain[:, burn:, :].reshape((-1, chain.shape[2]))
logMs = np.linspace(9, 11.5, num=200)
plt.figure()
for M1, Ms0, beta, delta, gamma in samples[np.random.randint(len(samples), size=100)]:
logmhalo = logMh(logMs, M1, Ms0, beta, delta, gamma)
plt.plot(logmhalo, logMs-logmhalo, color="k", alpha=0.1)
# plt.show()
plt.savefig('../MCMC_Candels/Plots/SMHM_noksi'+ str(idx_z) + "_niter=" + str(iterations) + '.pdf')
def plotHMvsSM_noksi(idx_z, iterations, burn):
load_smf()
load_hmf()
plt.close('all')
chain = np.load("../MCMC_Candels/Chain/Chain_noksi_z" + str(idx_z) + "_niter=" + str(iterations) + ".npy")
samples = chain[:, burn:, :].reshape((-1, chain.shape[2]))
logMs = np.linspace(9, 11.5, num=200)
for M1, Ms0, beta, delta, gamma in samples[np.random.randint(len(samples), size=100)]:
logmhalo = logMh(logMs, M1, Ms0, beta, delta, gamma)
plt.plot(logMs, logmhalo, color="k", alpha=0.1)
plt.xlabel('log Ms')
plt.ylabel('log Mhalo')
# plt.show()
plt.savefig('../MCMC_Candels/Plots/HMvsSM_noksi'+ str(idx_z) + "_niter=" + str(iterations) + '.pdf')
def plotchain(chainfile, idx_z, iterations, burn):
chain = np.load(chainfile)
figname = "../MCMC_Candels/Plots/Ksi_z" + str(idx_z) + "_niter=" + str(iterations) + "_burn=" + str(burn)
samples = chain[:, burn:, :].reshape((-1, chain.shape[2]))
fig = corner.corner(
samples, labels=['$M_{1}$', '$M_{*,0}$', '$\\beta$', '$\delta$', '$\gamma$', 'ksi'])
fig.savefig(figname + ".pdf")
plt.close('all')
def plotchain_noksi(chainfile, idx_z, iterations, burn):
chain = np.load(chainfile)
figname = "../MCMC_Candels/Plots/Noksi_z" + str(idx_z) + "_niter=" + str(iterations) + "_burn=" + str(burn)
samples = chain[:, burn:, :].reshape((-1, chain.shape[2]))
fig = corner.corner(
samples, labels=['$M_{1}$', '$M_{*,0}$', '$\\beta$', '$\delta$', '$\gamma$'])
fig.savefig(figname + ".pdf")
plt.close('all')
# for (p, loglike, state) in sampler.sample(p0, iterations=iterations):
# print(p)
# print(loglike)
def plotdist(chainfile, idx_z, iterations, burn):
chain = np.load(chainfile)
figname = "../MCMC_Candels/Plots/Ksi_z" + str(idx_z) + "_niter=" + str(iterations) + "_burn=" + str(burn)
names = ['$M_{1}$', '$M_{s,0}$', '$\\beta$', '$\delta$', '$\gamma$', 'ksi']
samples = chain[:, burn:, :].reshape((-1, chain.shape[2]))
samples = MCSamples(samples = samples, names = names)
g = plots.getSubplotPlotter()
g.triangle_plot(samples, filled=True)
g.export(figname + '_gd.pdf' )
plt.clf()
def plotdist_noksi(chainfile, idx_z, iterations, burn):
chain = np.load(chainfile)
figname = "../MCMC_Candels/Plots/Noksi_z" + str(idx_z) + "_niter=" + str(iterations) + "_burn=" + str(burn)
names = ['$M_{1}$', '$M_{s,0}$', '$\\beta$', '$\delta$', '$\gamma$']
samples = chain[:, burn:, :].reshape((-1, chain.shape[2]))
samples = MCSamples(samples = samples, names = names)
g = plots.getSubplotPlotter()
g.triangle_plot(samples, filled=True)
g.export(figname + '_gd.pdf' )
plt.clf()
def plotLnprob():
for k in range(20):
plt.plot(lnprob[k, :])
def plot_Mhpeak(chainfile, idx_z, iterations, burn):
mhpeak = MhPeak(chainfile, idx_z, iterations, burn)
avg_mhpeak = np.mean(mhpeak)
std_mhpeak = np.std(mhpeak)
plt.figure()
plt.hist(mhpeak, bins=100)
plt.axvline(avg_mhpeak, color='orange')
plt.title('idx_z = ' + str(idx_z) +', MhPeak = ' + str(avg_mhpeak) + '+/-' + str(std_mhpeak))
plt.savefig('../MCMC_Candels/Plots/MhaloPeak/MhPeak_z' + str(idx_z) + '.pdf')
""" Run MCMC """
def runMCMC(idx_z, starting_point, std, iterations, burn, nthreads):
load_smf()
load_hmf()
start_time = time.time()
nwalker = 20
# nthreads = 16 # Put more for multiprocessing automatically.
# starting_point = np.array([12.5, 10.8, 0.5, 0.5, 0.5, 0.15])
# std =np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.01])
# starting_point = np.array([12.5, 11, 0.5, 0.7, 0.5, 0.15])
p0 = emcee.utils.sample_ball(starting_point, std, size=nwalker)
ndim = len(starting_point)
sampler = emcee.EnsembleSampler(nwalker, ndim, loglike, args=[idx_z], threads=nthreads)
print("ndim = " + str(ndim))
print("start = " + str(starting_point))
print("std = " + str(std))
print("iterations = " + str(iterations))
sampler.run_mcmc(p0, iterations)
chainfile = "../MCMC_Candels/Chain/Chain_ksi_z" + str(idx_z) + "_niter=" + str(iterations) + ".npy"
np.save(chainfile, sampler.chain)
print("--- %s seconds ---" % (time.time() - start_time))
plotchain(chainfile, idx_z, iterations, burn)
def runMCMC_noksi(idx_z, starting_point, std, iterations, burn, nthreads=1):
load_smf()
load_hmf()
nwalker = 20
# Put more nthreads for multiprocessing automatically.
# starting_point = np.array([12, 11, 0.5, 0.5, 2.5])
# std = np.array([1, 1, 0.1, 0.1, 0.1])
p0 = emcee.utils.sample_ball(starting_point, std, size=nwalker)
ndim = len(starting_point)
sampler = emcee.EnsembleSampler(nwalker, ndim, loglike_noksi, args=[idx_z], threads=nthreads)
print("idx_z = " +str (idx_z))
print("ndim = " + str(ndim))
print("start = " + str(starting_point))
print("std = " + str(std))
print("iterations = " + str(iterations))
start_time = time.time()
sampler.run_mcmc(p0, iterations)
## Monitor the sampling progress
# nsteps = iterations/100
# width = 30
# for i, result in enumerate(sampler.sample(p0, iterations=nsteps)):
# n = int((width+1) * float(i) / nsteps)
# sys.stdout.write("\r[{0}{1}]".format('#' * n, ' ' * (width - n)))
# sys.stdout.write("\n")
elapsed_time = time.time() - start_time
print('Time elapsed : ' + str(elapsed_time))
savename = "../MCMC_Candels/Chain/Chain_noksi_z" + str(idx_z) + "_niter=" + str(iterations) + ".npy"
savenameln = "../MCMC_Candels/Chain/LnProb_noksi_z" + str(idx_z) + "_niter=" + str(iterations) + ".npy"
np.save(savename, sampler.chain)
np.save(savenameln, sampler.lnprobability)
plotchain_noksi(savename, idx_z, iterations, burn)
plotSMF_noksi(idx_z, iterations, burn)
plotSMHM_noksi(idx_z, iterations, burn)
plot_Mhpeak(savename, idx_z, iterations, burn)
save_results(savename, idx_z, iterations, burn)
plotHMvsSM_noksi(idx_z, iterations, burn)
def save_results(chainfile, idx_z, iterations, burn):
chain = np.load(chainfile)
# names = ['$M_{1}$', '$M_{s,0}$', '$\\beta$', '$\delta$', '$\gamma$', 'ksi']
names = ['$M_{1}$', '$M_{s,0}$', '$\\beta$', '$\delta$', '$\gamma$']
samples = chain[:, burn:, :].reshape((-1, chain.shape[2]))
samples = MCSamples(samples = samples, names = names)
res = samples.getTable()
#res.write("../MCMC_Candels/Results/Chain_ksi_z" + str(idx_z) + "_niter=" + str(iterations) + ".txt")
res.write("../MCMC_Candels/Results/Chain_Noksi_z" + str(idx_z) + "_niter=" + str(iterations) + ".txt")
def MhPeak(chainfile, idx_z, iterations, burn):
chain = np.load(chainfile)
samples = chain[:, burn:, :].reshape((-1, chain.shape[2]))
chainsize = np.shape(samples)[0]
logMs = np.linspace(8, 12, num=300)
Mhalopeak = np.zeros(chainsize)
for i in range(chainsize):
logmhalo = logMh(logMs, samples[i, 0], samples[i, 1], samples[i, 2], samples[i, 3], samples[i, 4])
Mhalopeak_idx = np.argmax(logMs - logmhalo)
Mhalopeak[i]= logmhalo[Mhalopeak_idx]
return Mhalopeak
# f = open("chain.dat", "w")
# f.close()
# for result in sampler.sample(p0, iterations=iterations):
# position = result[0]
# f = open("chain.dat", "a")
# for k in range(position.shape[0]):
# f.write("{0:4d} {1:s}\n".format(k, " ".join(position[k])))
# f.close()
# if __name__ == "__main__":
# main()
"""Plots and tests"""
# logMs = np.linspace(6, 12, num=100)
# plt.plot(logMs, logMh(logMs, 13, 10, 0.5, 0.5, 2.5))
# logmhtest =logMh(logMs, 13, 14, 0.5, 0.5, 2.5)
# plt.plot(logMh(logMs, 12, 10, 0.5, 0.5, 2.5), logMs - logMh(logMs, 12, 10, 0.5, 0.5, 2.5))
# Compare Observed and predicted SMF :
# load_smf()
# load_hmf()
# select = np.where(smf_candels[idx_z][:, 1] > -1000)[0]
# logMs = smf_candels[idx_z][select, 0]
# plt.errorbar(logMs, smf_candels[idx_z][select, 1],
# yerr=[smf_candels[idx_z][select, 3], smf_candels[idx_z][select, 2]], fmt='o')
# plt.ylim(-6, 0)
# # logphi = log_phi_direct(logMs, idx_z, 12.2, 10.8, 0.3, 0, 0.3)
# """ Leauthaud fit parameters for idx_z=0, we note a small difference maybe coming form the HMF"""
# # logphi = log_phi_direct(logMs, idx_z, 12.52, 10.916, 0.457, 0.566, 1.53)
# # # logphi = log_phi_true(logMs, idx_z, 12.52, 10.916, 0.457, 0.566, 1.53, 0.206**2)
# # logphi = log_phi_direct(logMs, idx_z, 12.518, 10.917, 0.456, 0.582, 1.48)
# """ Leauthaud fit parametres for idx_z=1 """
# # logphi = log_phi_direct(logMs, idx_z, 12.725, 11.038, 0.466, 0.61, 1.95)
# logphi = log_phi_direct(logMs, idx_z, 12.725, 11.038, 0.466, 0.61, 0.7) # fits better with smaller gamma
# plt.plot(logMs, logphi)
# logphi = log_phi_true(logMs, idx_z, M1, Ms0, beta, delta, gamma, ksi)
# logmhalo = logMh(logMs, M1, Ms0, beta, delta, gamma)
# plt.plot(logMs, logmhalo)
"""Good fit by eye for the idx_z=1, no_ksi
starting_point = ([12.7, 11.1, 0.5, 0.3, 1.2])
"""
# chi2_noksi(0, 12.7, 8.9, 0.3, 0.6, 2.5)
# theta = np.array([12.7, 8.9, 0.3, 0.6, 2.5])
# theta = np.array([ 11.73672883, 10.63457168 , 0.55492575 , 0.45137568 , 2.58689832])
# plt.plot(hmf_bolshoi[0][:,0], hmf_bolshoi[0][:,2])
# thetavar = np.array([np.linspace(10, 14, num=100), np.full(100, 11), np.full(100,0.5),
# np.full(100,0.5), np.full(100,2.5), np.full(100,0.15)])
# neglog = np.zeros(100)
# idx_z = 0
# for i in range(100):
# neglog[i] = negloglike(thetavar[:,i], idx_z)
# plt.plot(neglog)
# for i in range(ndim):
# plt.figure()
# for j in range(nwalker):
# plt.plot(chain[j, :, i], '.')
# plt.show()
# """Test emcee sampling"""
# nwalker=250
# ndim=6
# std = np.array([1, 1, 0.1, 0.1, 0.1, 0.1])
# p0 = emcee.utils.sample_ball(theta0, std, size=nwalker)
# sampler= emcee.EnsembleSampler(nwalker, ndim, negloglike, args=[idx_z])
# pos, prob, state = sampler.run_mcmc(p0, 100) ## burn phase
# sampler.run_mcmc(pos, 1000) ## samble phase
"""Select the chains that converged"""
# chain = np.load("../MCMC_Candels/Chain/Chain_noksi_z" + str(idx_z) + "_niter=" + str(iterations) + ".npy")
# lnprob = np.load("../MCMC_Candels/Chain/LnProb_noksi_z" + str(idx_z) + "_niter=" + str(iterations) + ".npy")
# for k in range(20):
# plt.plot(lnprob[k, :])
# select = np.where(lnprob[:, -1]>-30)
# chain = chain[lnprob[:, -1]>-30, :, :] | gpl-3.0 |
TMiguelT/PandasSchema | pandas_schema/column.py | 1 | 1246 | import typing
import pandas as pd
from . import validation
from .validation_warning import ValidationWarning
class Column:
def __init__(self, name: str, validations: typing.Iterable['validation._BaseValidation'] = [], allow_empty=False):
"""
Creates a new Column object
:param name: The column header that defines this column. This must be identical to the header used in the CSV/Data Frame you are validating.
:param validations: An iterable of objects implementing _BaseValidation that will generate ValidationErrors
:param allow_empty: True if an empty column is considered valid. False if we leave that logic up to the Validation
"""
self.name = name
self.validations = list(validations)
self.allow_empty = allow_empty
def validate(self, series: pd.Series) -> typing.List[ValidationWarning]:
"""
Creates a list of validation errors using the Validation objects contained in the Column
:param series: A pandas Series to validate
:return: An iterable of ValidationError instances generated by the validation
"""
return [error for validation in self.validations for error in validation.get_errors(series, self)]
| gpl-3.0 |
dimkal/mne-python | mne/decoding/ems.py | 16 | 4347 | # Author: Denis Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from ..utils import logger, verbose
from ..fixes import Counter
from ..parallel import parallel_func
from .. import pick_types, pick_info
@verbose
def compute_ems(epochs, conditions=None, picks=None, n_jobs=1, verbose=None):
"""Compute event-matched spatial filter on epochs
This version operates on the entire time course. No time window needs to
be specified. The result is a spatial filter at each time point and a
corresponding time course. Intuitively, the result gives the similarity
between the filter at each time point and the data vector (sensors) at
that time point.
References
----------
[1] Aaron Schurger, Sebastien Marti, and Stanislas Dehaene, "Reducing
multi-sensor data to a single time course that reveals experimental
effects", BMC Neuroscience 2013, 14:122
Parameters
----------
epochs : instance of mne.Epochs
The epochs.
conditions : list of str | None
If a list of strings, strings must match the
epochs.event_id's key as well as the number of conditions supported
by the objective_function. If None keys in epochs.event_id are used.
picks : array-like of int | None
Channels to be included. If None only good data channels are used.
Defaults to None
n_jobs : int
Number of jobs to run in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
surrogate_trials : ndarray, shape (trials, n_trials, n_time_points)
The trial surrogates.
mean_spatial_filter : ndarray, shape (n_channels, n_times)
The set of spatial filters.
conditions : ndarray, shape (n_epochs,)
The conditions used. Values correspond to original event ids.
"""
logger.info('...computing surrogate time series. This can take some time')
if picks is None:
picks = pick_types(epochs.info, meg=True, eeg=True)
if not len(set(Counter(epochs.events[:, 2]).values())) == 1:
raise ValueError('The same number of epochs is required by '
'this function. Please consider '
'`epochs.equalize_event_counts`')
if conditions is None:
conditions = epochs.event_id.keys()
epochs = epochs.copy()
else:
epochs = epochs[conditions]
epochs.drop_bad_epochs()
if len(conditions) != 2:
raise ValueError('Currently this function expects exactly 2 '
'conditions but you gave me %i' %
len(conditions))
ev = epochs.events[:, 2]
# special care to avoid path dependant mappings and orders
conditions = list(sorted(conditions))
cond_idx = [np.where(ev == epochs.event_id[k])[0] for k in conditions]
info = pick_info(epochs.info, picks)
data = epochs.get_data()[:, picks]
# Scale (z-score) the data by channel type
for ch_type in ['mag', 'grad', 'eeg']:
if ch_type in epochs:
if ch_type == 'eeg':
this_picks = pick_types(info, meg=False, eeg=True)
else:
this_picks = pick_types(info, meg=ch_type, eeg=False)
data[:, this_picks] /= np.std(data[:, this_picks])
from sklearn.cross_validation import LeaveOneOut
parallel, p_func, _ = parallel_func(_run_ems, n_jobs=n_jobs)
out = parallel(p_func(_ems_diff, data, cond_idx, train, test)
for train, test in LeaveOneOut(len(data)))
surrogate_trials, spatial_filter = zip(*out)
surrogate_trials = np.array(surrogate_trials)
spatial_filter = np.mean(spatial_filter, axis=0)
return surrogate_trials, spatial_filter, epochs.events[:, 2]
def _ems_diff(data0, data1):
"""default diff objective function"""
return np.mean(data0, axis=0) - np.mean(data1, axis=0)
def _run_ems(objective_function, data, cond_idx, train, test):
d = objective_function(*(data[np.intersect1d(c, train)] for c in cond_idx))
d /= np.sqrt(np.sum(d ** 2, axis=0))[None, :]
# compute surrogates
return np.sum(data[test[0]] * d, axis=0), d
| bsd-3-clause |
gyglim/Recipes | papers/preactivation_and_wide_resnet/train_nn.py | 3 | 5152 | """
Lasagne implementation of CIFAR-10 examples from "Identity Mappings in Deep Residual Networks" (https://arxiv.org/abs/1603.05027) and "Wide Residual Networks" (https://arxiv.org/abs/1605.07146)
"""
import sys
import gzip
import time
import pickle
import numpy as np
import theano
from theano import tensor as T
import lasagne
from lasagne.updates import nesterov_momentum, adam
from lasagne.layers import helper
from utils import load_pickle_data_cv, batch_iterator_valid, batch_iterator_train_crop_flip
from matplotlib import pyplot
variant = sys.argv[1] if len(sys.argv) > 1 else 'normal'
depth = int(sys.argv[2]) if len(sys.argv) > 2 else 18
width = int(sys.argv[3]) if len(sys.argv) > 3 else 1
print 'Using %s ResNet with depth %d and width %d.'%(variant,depth,width)
if variant == 'normal':
from models import ResNet_FullPreActivation as ResNet
elif variant == 'bottleneck':
from models import ResNet_BottleNeck_FullPreActivation as ResNet
elif variant == 'wide':
from models import ResNet_FullPre_Wide as ResNet
else:
print 'Unsupported model %s'%variant
# training params
ITERS = 200
BATCHSIZE = 64
LR_SCHEDULE = {
0: 0.01,
10: 0.1,
80: 0.01,
120: 0.001,
}
"""
Set up all theano functions
"""
X = T.tensor4('X')
Y = T.ivector('y')
# set up theano functions to generate output by feeding data through network, any test outputs should be deterministic
# load model
if width > 1:
output_layer = ResNet(X, n=depth, k=width)
else:
output_layer = ResNet(X, n=depth)
# create outputs
output_train = lasagne.layers.get_output(output_layer)
output_test = lasagne.layers.get_output(output_layer, deterministic=True)
# set up the loss that we aim to minimize when using cat cross entropy our Y should be ints not one-hot
loss = lasagne.objectives.categorical_crossentropy(output_train, Y)
loss = loss.mean()
# if using ResNet use L2 regularization
all_layers = lasagne.layers.get_all_layers(output_layer)
l2_penalty = lasagne.regularization.regularize_layer_params(all_layers, lasagne.regularization.l2) * 0.0001
loss = loss + l2_penalty
# set up loss functions for validation dataset
test_loss = lasagne.objectives.categorical_crossentropy(output_test, Y)
test_loss = test_loss.mean()
test_acc = T.mean(T.eq(T.argmax(output_test, axis=1), Y), dtype=theano.config.floatX)
# get parameters from network and set up sgd with nesterov momentum to update parameters, l_r is shared var so it can be changed
l_r = theano.shared(np.array(LR_SCHEDULE[0], dtype=theano.config.floatX))
params = lasagne.layers.get_all_params(output_layer, trainable=True)
updates = nesterov_momentum(loss, params, learning_rate=l_r, momentum=0.9)
#updates = adam(loss, params, learning_rate=l_r)
# set up training and prediction functions
train_fn = theano.function(inputs=[X,Y], outputs=loss, updates=updates)
valid_fn = theano.function(inputs=[X,Y], outputs=[test_loss, test_acc])
'''
load training data and start training
'''
# load the training and validation data sets
train_X, test_X, train_y, test_y = load_pickle_data_cv()
print 'Train shape:', train_X.shape, 'Test shape:', test_X.shape
print 'Train y shape:', train_y.shape, 'Test y shape:', test_y.shape
print np.amax(train_X), np.amin(train_X), np.mean(train_X)
# loop over training functions for however many iterations, print information while training
train_eval = []
valid_eval = []
valid_acc = []
best_acc = 0.0
try:
for epoch in range(ITERS):
# change learning rate according to schedules
if epoch in LR_SCHEDULE:
l_r.set_value(LR_SCHEDULE[epoch])
# do the training
start = time.time()
train_loss = batch_iterator_train_crop_flip(train_X, train_y, BATCHSIZE, train_fn)
train_eval.append(train_loss)
valid_loss, acc_v = batch_iterator_valid(test_X, test_y, BATCHSIZE, valid_fn)
valid_eval.append(valid_loss)
valid_acc.append(1.0 - acc_v)
ratio = train_loss / valid_loss
end = time.time() - start
# print training details
print 'iter:', epoch, '| TL:', np.round(train_loss,decimals=3), '| VL:', np.round(valid_loss,decimals=3), '| Vacc:', np.round(acc_v,decimals=3), '| Ratio:', np.round(ratio,decimals=2), '| Time:', np.round(end,decimals=1)
if acc_v > best_acc:
best_acc = acc_v
best_params = helper.get_all_param_values(output_layer)
except KeyboardInterrupt:
pass
print "Final Acc:", best_acc
# save weights
all_params = helper.get_all_param_values(output_layer)
f = gzip.open('data/weights/%s%d_resnet.pklz'%(variant,depth), 'wb')
pickle.dump(best_params, f)
f.close()
# plot loss and accuracy
train_eval = np.array(train_eval)
valid_acc = np.array(valid_acc)
pyplot.plot(train_eval, label='Train loss', color='#707070')
pyplot.ylabel('Categorical Cross Entropy Loss')
pyplot.xlabel('Epoch')
pyplot.legend(loc=2)
#pyplot.ylim([0,1.5])
pyplot.twinx()
pyplot.ylabel('Valid Acc Error (%)')
pyplot.grid()
pyplot.plot(valid_acc, label='Valid classification error (%)', color='#ED5724')
pyplot.legend(loc=1)
pyplot.savefig('plots/%s%d_resnet.png'%(variant,depth))
pyplot.clf()
#pyplot.show()
| mit |
kadubarbosa/aga503 | ep4.py | 1 | 2764 | # -*- coding: utf-8 -*-
"""
Created on 05/11/15
@author: Carlos Eduardo Barbosa
Resposta do exercicio 4
"""
import os
import numpy as np
from scipy.optimize import leastsq
import matplotlib.pyplot as plt
def planck(wave, temp):
""" Returns the Planck's Law intensities.
Wavelengths are in Angstroms
Temperatures in K.
Output is converted to ergs/cm2/s/A to reproduce IDL version."""
k = 1.380658e-16 # Boltzmann constant in erg / K
h = 6.6260755e-27 # Planck constant in erg / s
c = 2.99792458e10 # Speed of light in cm / s
w = wave * 1.e-8 # Convertion to cm
B = 2 * h * c * c * np.power(w, -5) / (np.exp(h * c / (w * k * temp)) - 1.)
# Returns Planck's Law in units of ergs/cm2/s/A
return np.pi * B * 1e-8
def model(wave, T1, T2):
return planck(wave, T1) / (sigma * T1 * T1 * T1 * T1) + \
planck(wave, T2) / (sigma * T2 * T2 * T2 * T2)
def chi2(wave, flux, err, T1, T2):
""" Return the Chi2 map. """
chi2map = np.zeros_like(T1)
for w,f,e in zip(wave,flux,err):
chi2map += np.power((f - model(w, T1, T2)) / e, 2.)
return chi2map / (len(wave) - 1)
if __name__ == "__main__":
## Physical constants ####################################################
k = 1.380658e-16 # Boltzmann constant in erg / K
h = 6.6260755e-27 # Planck constant in erg / s
c = 2.99792458e10 # Speed of light in cm / s
sigma = 5.670367e-5 # Stefan-Boltzman constant in erg / cm2 / s / K4
##########################################################################
table = "/home/kadu/Dropbox/aga503/ep4/dados_vinicius.txt"
wave, flux, err = np.loadtxt(table).T
##########################################################################
# fig1 = plt.figure(1)
# ax1 = plt.subplot(211)
# ax1.errorbar(wave, flux, yerr=err)
# ax1.minorticks_on()
# plt.pause(0.001)
##########################################################################
t1 = np.linspace(3000, 6000., 100)
t2 = np.linspace(700, 1000., 100)
# t1 = np.linspace(5100, 5150., 100)
# t2 = np.linspace(100, 1000., 100)
T1, T2 = np.meshgrid(t1,t2)
# fig3 = plt.figure(3)
# plt.imshow(np.sqrt(T1**2 + T2**2), origin="bottom")
# plt.colorbar()
fig2 = plt.figure(2)
plt.minorticks_on()
map = chi2(wave, flux, err, T1, T2)
print map.min(), map.max()
# map = np.clip(map, map.min(), 10* map.min())
idx =np.where(map == map.min())[0]
plt.imshow(map, origin="bottom", extent=[t1[0], t1[-1], t2[0], t2[-1]],
aspect="auto", vmin=1, vmax=5)
plt.xlabel(r"$T_1$ [K]")
plt.ylabel(r"$T_2$ [K]")
plt.colorbar(label=r"$\chi^2_\nu$")
plt.tight_layout()
plt.savefig("chi2.png")
plt.show(block=1)
| gpl-2.0 |
davisvideochallenge/davis2017-evaluation | davis2017/davis.py | 1 | 5514 | import os
from glob import glob
from collections import defaultdict
import numpy as np
from PIL import Image
class DAVIS(object):
SUBSET_OPTIONS = ['train', 'val', 'test-dev', 'test-challenge']
TASKS = ['semi-supervised', 'unsupervised']
DATASET_WEB = 'https://davischallenge.org/davis2017/code.html'
VOID_LABEL = 255
def __init__(self, root, task='unsupervised', subset='val', sequences='all', resolution='480p', codalab=False):
"""
Class to read the DAVIS dataset
:param root: Path to the DAVIS folder that contains JPEGImages, Annotations, etc. folders.
:param task: Task to load the annotations, choose between semi-supervised or unsupervised.
:param subset: Set to load the annotations
:param sequences: Sequences to consider, 'all' to use all the sequences in a set.
:param resolution: Specify the resolution to use the dataset, choose between '480' and 'Full-Resolution'
"""
if subset not in self.SUBSET_OPTIONS:
raise ValueError(f'Subset should be in {self.SUBSET_OPTIONS}')
if task not in self.TASKS:
raise ValueError(f'The only tasks that are supported are {self.TASKS}')
self.task = task
self.subset = subset
self.root = root
self.img_path = os.path.join(self.root, 'JPEGImages', resolution)
annotations_folder = 'Annotations' if task == 'semi-supervised' else 'Annotations_unsupervised'
self.mask_path = os.path.join(self.root, annotations_folder, resolution)
year = '2019' if task == 'unsupervised' and (subset == 'test-dev' or subset == 'test-challenge') else '2017'
self.imagesets_path = os.path.join(self.root, 'ImageSets', year)
self._check_directories()
if sequences == 'all':
with open(os.path.join(self.imagesets_path, f'{self.subset}.txt'), 'r') as f:
tmp = f.readlines()
sequences_names = [x.strip() for x in tmp]
else:
sequences_names = sequences if isinstance(sequences, list) else [sequences]
self.sequences = defaultdict(dict)
for seq in sequences_names:
images = np.sort(glob(os.path.join(self.img_path, seq, '*.jpg'))).tolist()
if len(images) == 0 and not codalab:
raise FileNotFoundError(f'Images for sequence {seq} not found.')
self.sequences[seq]['images'] = images
masks = np.sort(glob(os.path.join(self.mask_path, seq, '*.png'))).tolist()
masks.extend([-1] * (len(images) - len(masks)))
self.sequences[seq]['masks'] = masks
def _check_directories(self):
if not os.path.exists(self.root):
raise FileNotFoundError(f'DAVIS not found in the specified directory, download it from {self.DATASET_WEB}')
if not os.path.exists(os.path.join(self.imagesets_path, f'{self.subset}.txt')):
raise FileNotFoundError(f'Subset sequences list for {self.subset} not found, download the missing subset '
f'for the {self.task} task from {self.DATASET_WEB}')
if self.subset in ['train', 'val'] and not os.path.exists(self.mask_path):
raise FileNotFoundError(f'Annotations folder for the {self.task} task not found, download it from {self.DATASET_WEB}')
def get_frames(self, sequence):
for img, msk in zip(self.sequences[sequence]['images'], self.sequences[sequence]['masks']):
image = np.array(Image.open(img))
mask = None if msk is None else np.array(Image.open(msk))
yield image, mask
def _get_all_elements(self, sequence, obj_type):
obj = np.array(Image.open(self.sequences[sequence][obj_type][0]))
all_objs = np.zeros((len(self.sequences[sequence][obj_type]), *obj.shape))
obj_id = []
for i, obj in enumerate(self.sequences[sequence][obj_type]):
all_objs[i, ...] = np.array(Image.open(obj))
obj_id.append(''.join(obj.split('/')[-1].split('.')[:-1]))
return all_objs, obj_id
def get_all_images(self, sequence):
return self._get_all_elements(sequence, 'images')
def get_all_masks(self, sequence, separate_objects_masks=False):
masks, masks_id = self._get_all_elements(sequence, 'masks')
masks_void = np.zeros_like(masks)
# Separate void and object masks
for i in range(masks.shape[0]):
masks_void[i, ...] = masks[i, ...] == 255
masks[i, masks[i, ...] == 255] = 0
if separate_objects_masks:
num_objects = int(np.max(masks[0, ...]))
tmp = np.ones((num_objects, *masks.shape))
tmp = tmp * np.arange(1, num_objects + 1)[:, None, None, None]
masks = (tmp == masks[None, ...])
masks = masks > 0
return masks, masks_void, masks_id
def get_sequences(self):
for seq in self.sequences:
yield seq
if __name__ == '__main__':
from matplotlib import pyplot as plt
only_first_frame = True
subsets = ['train', 'val']
for s in subsets:
dataset = DAVIS(root='/home/csergi/scratch2/Databases/DAVIS2017_private', subset=s)
for seq in dataset.get_sequences():
g = dataset.get_frames(seq)
img, mask = next(g)
plt.subplot(2, 1, 1)
plt.title(seq)
plt.imshow(img)
plt.subplot(2, 1, 2)
plt.imshow(mask)
plt.show(block=True)
| bsd-3-clause |
effigies/PySurfer | setup.py | 4 | 3134 | #! /usr/bin/env python
#
# Copyright (C) 2011-2014 Alexandre Gramfort
# Michael Waskom
# Scott Burns
# Martin Luessi
# Eric Larson
descr = """PySurfer: cortical surface visualization using Python."""
import os
# deal with MPL sandbox violations during easy_install
os.environ['MPLCONFIGDIR'] = '.'
# get the version, don't import surfer here so setup works on headless systems
version = None
with open(os.path.join('surfer', '__init__.py'), 'r') as fid:
for line in (line.strip() for line in fid):
if line.startswith('__version__'):
version = line.split('=')[1].strip().strip('"')
break
if version is None:
raise RuntimeError('Could not determine version')
DISTNAME = 'pysurfer'
DESCRIPTION = descr
LONG_DESCRIPTION = descr
MAINTAINER = 'Michael Waskom'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://pysurfer.github.com'
LICENSE = 'BSD (3-clause)'
DOWNLOAD_URL = 'https://github.com/nipy/PySurfer'
VERSION = version
def check_dependencies():
needed_deps = ["IPython",
"numpy", "scipy", "matplotlib",
"mayavi",
]
missing_deps = []
for dep in needed_deps:
try:
__import__(dep)
except ImportError:
missing_deps.append(dep)
if missing_deps:
missing = ", ".join(missing_deps)
raise ImportError("Missing dependencies: %s" % missing)
from setuptools import setup
if __name__ == "__main__":
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
import sys
if not (len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'--version',
'egg_info',
'clean'))):
check_dependencies()
setup(name=DISTNAME,
maintainer=MAINTAINER,
include_package_data=True,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
zip_safe=False, # the package can run out of an .egg file
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'License :: OSI Approved',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'
],
platforms='any',
packages=['surfer', 'surfer.tests'],
scripts=['bin/pysurfer'],
install_requires=['nibabel >= 1.2'],
)
| bsd-3-clause |
othercriteria/StochasticBlockmodel | test_c_elegans.py | 1 | 9744 | #!/usr/bin/env python
# Application of network inference to C. elegans connectome
# Daniel Klein, 4/10/2013
import json
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
import xlrd
from Network import network_from_edges
from Models import StationaryLogistic, NonstationaryLogistic
from Models import FixedMargins, alpha_zero
# Parameters
params = { 'use_gap': False,
'use_chemical': True,
'cov_gap': False,
'cov_chemical': False,
'cov_soma_diff': False,
'cov_soma_dist': True,
'cov_soma_dir': False,
'cov_lineage': False,
'cov_class': False,
'file_network': 'data/c_elegans_chen/NeuronConnect.xls',
'file_neurons': 'data/c_elegans_chen/NeuronType.xls',
'file_landmarks': 'data/c_elegans_chen/NeuronFixedPoints.xls',
'file_lineage_1': 'data/c_elegans_chen/NeuronLineage_Part1.xls',
'file_lineage_2': 'data/c_elegans_chen/NeuronLineage_Part2.xls',
'n_samples': 100,
'n_bootstrap': 100,
'outfile': 'out.pdf' }
# Import network connectivity from file
edges = []
edges_gap = set()
edges_chemical = set()
nodes = set()
wb_network = xlrd.open_workbook(params['file_network'])
ws_network = wb_network.sheet_by_name('NeuronConnect.csv')
for row in range(1, ws_network.nrows):
n_1 = ws_network.cell_value(row, 0)
n_2 = ws_network.cell_value(row, 1)
t = ws_network.cell_value(row, 2)[0]
if n_1 == 'NMJ' or n_2 == 'NMJ': continue
if t == 'E':
edges_gap.add((n_1, n_2))
edges_gap.add((n_2, n_1))
if params['use_gap']:
edges.append((n_1, n_2))
edges.append((n_2, n_1))
nodes.add(n_1)
nodes.add(n_2)
if t == 'S':
edges_chemical.add((n_1, n_2))
if params['use_chemical']:
edges.append((n_1, n_2))
nodes.add(n_1)
nodes.add(n_2)
print '# Nodes: %d' % len(nodes)
# Initialize network from connectivity data
net = network_from_edges(edges)
net.initialize_offset()
for i in range(net.N):
net.offset[i,i] = -10.0
A = net.as_dense()
r = A.sum(1)
c = A.sum(0)
print '# Edges: %d' % A.sum()
cov_names = []
def add_cov_f(name, f):
cov_names.append(name)
net.new_edge_covariate(name).from_binary_function_name(f)
# Covariates from other synapse type
if params['cov_gap']:
def f_gap(n_1, n_2):
return (n_1, n_2) in edges_gap
add_cov_f('gap', f_gap)
if params['cov_chemical']:
def f_chemical(n_1, n_2):
return (n_1, n_2) in edges_chemical
add_cov_f('chemical', f_chemical)
# Import soma position from file
soma_pos = {}
wb_neurons = xlrd.open_workbook(params['file_neurons'])
ws_neurons = wb_neurons.sheet_by_name('NeuronType.csv')
for row in range(1, ws_neurons.nrows):
n = ws_neurons.cell_value(row, 0)
pos = ws_neurons.cell_value(row, 1)
soma_pos[n] = pos
cov = net.new_node_covariate('soma_pos')
cov.from_pairs(soma_pos.keys(), soma_pos.values())
if params['cov_soma_diff']:
def f_soma_pos_diff(n_1, n_2):
return (soma_pos[n_1] - soma_pos[n_2])
add_cov_f('soma_dist', f_soma_pos_diff)
if params['cov_soma_dist']:
def f_soma_pos_dist(n_1, n_2):
return abs(soma_pos[n_1] - soma_pos[n_2])
add_cov_f('soma_dist', f_soma_pos_dist)
if params['cov_soma_dir']:
def f_soma_pos_dir(n_1, n_2):
return soma_pos[n_2] > soma_pos[n_1]
add_cov_f('soma_dir', f_soma_pos_dir)
# Import landmark type (hence sensory, motor, inter-) from file
neuron_class = {}
wb_landmarks = xlrd.open_workbook(params['file_landmarks'])
ws_landmarks = wb_landmarks.sheet_by_name('NeuronFixedPoints.csv')
for row in range(1, ws_landmarks.nrows):
n = ws_landmarks.cell_value(row, 0)
t = ws_landmarks.cell_value(row, 1)[0]
neuron_class[n] = t
for n in net.names:
if not n in neuron_class:
neuron_class[n] = 'I'
if params['cov_class']:
for class_1 in ['S', 'I', 'M']:
for class_2 in ['S', 'I', 'M']:
if class_1 == 'I' and class_2 == 'I': continue
def f_same_class(n_1, n_2):
return ((neuron_class[n_1] == class_1) and
(neuron_class[n_2] == class_2))
add_cov_f('%s_%s' % (class_1, class_2), f_same_class)
# Import lineage distance from files
dist = {}
wb_lineage_1 = xlrd.open_workbook(params['file_lineage_1'])
ws_lineage_1 = wb_lineage_1.sheet_by_name('NeuronLineage_Part1.csv')
for row in range(1, ws_lineage_1.nrows):
n_1 = ws_lineage_1.cell_value(row, 0)
n_2 = ws_lineage_1.cell_value(row, 1)
d = ws_lineage_1.cell_value(row, 2)
dist[(n_1,n_2)] = d
dist[(n_2,n_1)] = d
wb_lineage_2 = xlrd.open_workbook(params['file_lineage_2'])
ws_lineage_2 = wb_lineage_2.sheet_by_name('NeuronLineage_Part2.csv')
for row in range(1, ws_lineage_1.nrows):
n_1 = ws_lineage_2.cell_value(row, 0)
n_2 = ws_lineage_2.cell_value(row, 1)
d = ws_lineage_2.cell_value(row, 2)
dist[(n_1,n_2)] = d
dist[(n_2,n_1)] = d
if params['cov_lineage']:
def f_lineage_dist(n_1, n_2):
if n_1 == n_2: return 0
return dist[(n_1, n_2)]
add_cov_f('lineage_dist', f_lineage_dist)
# Functions for plotting
def style(ax):
ax.tick_params(axis = 'both', which = 'both',
bottom = 'off', top = 'off', labelbottom = 'off',
left = 'off', right = 'off', labelleft = 'off')
def heatmap(ax, data):
ax.imshow(data[o][:,o]).set_cmap('binary')
style(ax)
def residuals(ax, data_mean, data_sd):
resid = np.abs((data_mean - A) / data_sd)
resid[(data_sd == 0) * (data_mean == A)] = 0.0
resid[(data_sd == 0) * (data_mean != A)] = 10.0
ax.imshow(resid[o][:,o], vmin = 0, vmax = 3.0).set_cmap('gray')
style(ax)
# Display observed network
o = np.argsort(net.node_covariates['soma_pos'][:])
A = net.as_dense()
fig, ax = plt.subplots()
ax.set_title('Observed connectome')
heatmap(ax, A)
plt.show()
fig, ax = plt.subplots()
ax.set_title('Observed connectome network')
graph = nx.DiGraph()
for n1, n2 in edges:
graph.add_edge(n1, n2)
pos = nx.nx_pydot.graphviz_layout(graph, prog = 'fdp')
nx.draw(graph, pos, ax = ax, node_size = 50, with_labels = False)
plt.show()
# Store sampled typical networks from fit models
s_samples = np.empty((params['n_samples'], net.N, net.N))
ns_samples = np.empty((params['n_samples'], net.N, net.N))
c_samples = np.empty((params['n_samples'], net.N, net.N))
def display_cis(model):
procedures = set()
for par in model.conf:
for procedure in model.conf[par]:
procedures.add(procedure)
for procedure in procedures:
print '%s:' % procedure
for par in model.conf:
if not procedure in model.conf[par]: continue
ci = model.conf[par][procedure]
print ' %s: (%.2f, %.2f)' % (par, ci[0], ci[1])
print
print 'Fitting stationary model'
s_model = StationaryLogistic()
for cov_name in cov_names:
s_model.beta[cov_name] = None
s_model.fit(net)
print 'NLL: %.2f' % s_model.nll(net)
print 'kappa: %.2f' % s_model.kappa
for cov_name in cov_names:
print '%s: %.2f' % (cov_name, s_model.beta[cov_name])
print
for rep in range(params['n_samples']):
s_samples[rep,:,:] = s_model.generate(net)
s_model.confidence_boot(net, n_bootstrap = params['n_bootstrap'])
s_model.confidence_wald(net)
display_cis(s_model)
print 'Fitting conditional model'
c_model = FixedMargins(StationaryLogistic())
net.new_row_covariate('r', np.int)[:] = r
net.new_col_covariate('c', np.int)[:] = c
c_model.fit = c_model.base_model.fit_conditional
for cov_name in cov_names:
c_model.base_model.beta[cov_name] = None
c_model.fit(net, verbose = True)
print 'NLL: %.2f' % c_model.nll(net)
for cov_name in cov_names:
print '%s: %.2f' % (cov_name, c_model.base_model.beta[cov_name])
print
for rep in range(params['n_samples']):
c_samples[rep,:,:] = c_model.generate(net, coverage = 0.1)
c_model.confidence_boot(net, n_bootstrap = params['n_bootstrap'])
c_model.confidence_wald(net)
for cov_name in cov_names:
c_model.confidence_cons(net, cov_name, L = 121, test = 'score')
c_model.confidence_cons(net, cov_name, L = 121, test = 'lr')
display_cis(c_model)
# Offset extreme substructure only for Nonstationary model
net.offset_extremes()
print 'Fitting nonstationary model'
ns_model = NonstationaryLogistic()
for cov_name in cov_names:
ns_model.beta[cov_name] = None
ns_model.fit(net)
print 'NLL: %.2f' % ns_model.nll(net)
print 'kappa: %.2f' % ns_model.kappa
for cov_name in cov_names:
print '%s: %.2f' % (cov_name, ns_model.beta[cov_name])
print
for rep in range(params['n_samples']):
ns_samples[rep,:,:] = ns_model.generate(net)
ns_model.confidence_boot(net, n_bootstrap = params['n_bootstrap'])
ns_model.confidence_wald(net)
display_cis(ns_model)
# Calculate sample means and variances
s_samples_mean = np.mean(s_samples, axis = 0)
s_samples_sd = np.sqrt(np.var(s_samples, axis = 0))
ns_samples_mean = np.mean(ns_samples, axis = 0)
ns_samples_sd = np.sqrt(np.var(ns_samples, axis = 0))
c_samples_mean = np.mean(c_samples, axis = 0)
c_samples_sd = np.sqrt(np.var(c_samples, axis = 0))
# Finish plotting
plt.figure()
ax = plt.subplot(231)
ax.set_title('Stationary')
heatmap(ax, s_samples_mean)
ax = plt.subplot(234)
residuals(ax, s_samples_mean, s_samples_sd)
ax = plt.subplot(232)
ax.set_title('Nonstationary')
heatmap(ax, ns_samples_mean)
ax = plt.subplot(235)
residuals(ax, ns_samples_mean, ns_samples_sd)
ax = plt.subplot(233)
ax.set_title('Conditional')
heatmap(ax, c_samples_mean)
ax = plt.subplot(236)
residuals(ax, c_samples_mean, c_samples_sd)
plt.tight_layout()
if params['outfile']:
plt.savefig(params['outfile'])
plt.show()
| mit |
MohammedWasim/scikit-learn | examples/plot_multilabel.py | 236 | 4157 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
0asa/scikit-learn | sklearn/tests/test_pipeline.py | 17 | 12512 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import BaseEstimator, clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(BaseEstimator):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
""" Test the various init parameters of the pipeline.
"""
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params()
params2 = pipe2.get_params()
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
""" Test the various methods of the pipeline (anova).
"""
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
"""Test that the pipeline can take fit parameters
"""
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_methods_pca_svm():
"""Test the various methods of the pipeline (pca + svm)."""
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
"""Test the various methods of the pipeline (preprocessing + svm)."""
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
| bsd-3-clause |
DangoMelon0701/PyRemote-Sensing | NETCDF scripts/SODA3/anomalies.py | 1 | 5546 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 24 15:40:25 2017
@author: Gerardo A. Rivera Tello
"""
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from scipy.io import netcdf
from osgeo import gdal,osr
import numpy as np
import os
#%%
if 'GDAL_DATA' not in os.environ:
os.environ['GDAL_DATA'] = r'/home/DangoMelon0701/anaconda3/pkgs/libgdal-2.1.0-0/share/gdal'
#%%
# Funcion para plot en el mapa
def plot_map(data,lat,lon,band=None,title=None,savefig=None,name='image'):
fig, axis = plt.subplots(figsize=(10,20))
m = Basemap(projection = 'cyl', resolution = 'l',
llcrnrlat=lat.min()-1,urcrnrlat=lat.max()+1,
llcrnrlon=lon.min()-1, urcrnrlon=lon.max()+1)
m.drawcoastlines(linewidth = 0.5)
m.drawcountries()
m.drawparallels(np.arange(-90.0,90.0,2.0), labels = [1,0,0,0])
m.drawmeridians(np.arange(-180.0,180.0,2.0), labels = [0,0,0,0],linewidth=0.5)
m.drawmeridians(np.arange(-180.0,180.0,10.0), labels = [0,0,0,1],linewidth=0.5)
x, y =m(lon, lat)
if band == None:
mmap=m.pcolormesh(x, y, data, vmin=data.min(),vmax=data.max(),cmap=plt.cm.bwr)
else:
mmap=m.pcolormesh(x, y, data[band], vmin=data.min(),vmax=data.max(),cmap=plt.cm.bwr)
cbar = m.colorbar(mmap,location='bottom',size='10%',pad='15%')
cbar.set_label('°C')
if title != None:
axis.set_title(title)
if savefig != None:
fig.savefig("{}.png".format(name),dpi=1000,bbox_inches='tight')
#%%
#Esta clase calcula las anomalias del siguiente conjunto de ecuaciones
# x = x_clim + x~
# donde
# x_clim = [x]+<x> ; [x] = promedio y <x> = estacionalidad de la variable
class Anomalies(object):
def __init__(self,data_netcdf):
self.nc_file = netcdf.NetCDFFile(data_netcdf,'r')
self.bandsn = self.nc_file.dimensions['time']
self.lines = self.nc_file.dimensions['latitude']
self.samples = self.nc_file.dimensions['longitude']
self.whole_data = np.zeros([self.bandsn,self.lines,self.samples])
self.lon,self.lat = np.meshgrid(self.nc_file.variables['longitude'][:],
self.nc_file.variables['latitude'][:])
self.temp = self.nc_file.variables['temp'][:,0]
for band in range(1,self.bandsn+1):
self.whole_data[band-1]=self.temp[band-1]
def get_whole_mean(self):
data_mean = np.zeros([self.lines,self.samples])
for band in range(1,self.bandsn+1):
data_mean +=self.whole_data[band-1]
return data_mean/self.bandsn
def get_seasonality(self,mean):
season_data = np.zeros([12,self.lines,self.samples])
global_diff = np.zeros([self.bandsn,self.lines,self.samples])
for step in range(self.bandsn):
global_diff[step]=self.whole_data[step]-mean
for month_number in range(12):
for year in range(int(self.bandsn/12)):
season_data[month_number] += global_diff[12*year+month_number]
season_data[month_number] /= 10
return season_data
def get_climvalue(self,mean,seasonality):
return mean+seasonality
def get_anomalie(self):
anomalie = np.zeros([self.bandsn,self.lines,self.samples])
mean = self.get_whole_mean()
climval = self.get_climvalue(mean,self.get_seasonality(mean))
for year in range(1,int(self.bandsn/12)+1):
anomalie[12*(year-1):12*year]=self.whole_data[12*(year-1):12*year]-climval
return anomalie
def get_anomalie_mean(self,anomalie):
anomalie_mean = np.zeros([self.lines,self.samples])
for band in range(self.bandsn):
anomalie_mean +=anomalie[band]
return anomalie_mean/self.bandsn
def save_tiff(self,file,data,raster_out="stacked_data.tif"):
#Genero el tiff de salida
driver = gdal.GetDriverByName('GTiff')
outRaster = driver.Create(raster_out, self.samples, self.lines, self.bandsn, gdal.GDT_Float32)
gdal_file = gdal.Open(file)
outRaster.SetGeoTransform(gdal_file.GetGeoTransform())
for band in range(1,self.bandsn+1):
outband = outRaster.GetRasterBand(band)
outband.WriteArray(data[band-1])
outRasterSRS = osr.SpatialReference()
outRasterSRS.ImportFromEPSG(4326)
outRaster.SetProjection(outRasterSRS.ExportToWkt())
outband.FlushCache()
del outRaster, outRasterSRS, gdal_file
def end_anom(self):
self.nc_file.close()
#%%
if __name__ == '__main__':
files_list=[]
for files in os.listdir(os.getcwd()):
if files.endswith(".nc"):
files_list.append(files)
for f_netcdf in files_list:
calc = Anomalies(f_netcdf)
anom = calc.get_anomalie()
anom_mean = calc.get_anomalie_mean(anom)
calc.save_tiff(f_netcdf,anom,raster_out='ptemp_stacked.tif')
#%%
plot_map(anom_mean,calc.lat,calc.lon,
title='Anomalies Mean from Jan2000 to Dec2009 - El Nino Zone 4',
savefig=1,name='mean_anomalies')
plot_map(anom,calc.lat,calc.lon,band=0,
title='Potential Temperature Anomalie from Jan2000 - El Nino Zone 4',
savefig=1,name='anom_jan2000en4')
plot_map(anom,calc.lat,calc.lon,band=24,
title='Potential Temperature Anomalies from Jan2002 - El Nino Zone 4',
savefig=1,name='anom_jan2002en4')
calc.end_anom() | mit |
basnijholt/holoviews | holoviews/element/comparison.py | 2 | 26491 | """
Helper classes for comparing the equality of two HoloViews objects.
These classes are designed to integrate with unittest.TestCase (see
the tests directory) while making equality testing easily accessible
to the user.
For instance, to test if two Matrix objects are equal you can use:
Comparison.assertEqual(matrix1, matrix2)
This will raise an AssertionError if the two matrix objects are not
equal, including information regarding what exactly failed to match.
Note that this functionality could not be provided using comparison
methods on all objects as comparison operators only return Booleans and
thus would not supply any information regarding *why* two elements are
considered different.
"""
from functools import partial
import numpy as np
from unittest.util import safe_repr
from unittest import TestCase
from numpy.testing import assert_array_equal, assert_array_almost_equal
from . import * # noqa (All Elements need to support comparison)
from ..core import (Element, Empty, AdjointLayout, Overlay, Dimension,
HoloMap, Dimensioned, Layout, NdLayout, NdOverlay,
GridSpace, DynamicMap, GridMatrix, OrderedDict)
from ..core.options import Options, Cycle
from ..core.util import pd, datetime_types, dt_to_int
class ComparisonInterface(object):
"""
This class is designed to allow equality testing to work
seamlessly with unittest.TestCase as a mix-in by implementing a
compatible interface (namely the assertEqual method).
The assertEqual class method is to be overridden by an instance
method of the same name when used as a mix-in with TestCase. The
contents of the equality_type_funcs dictionary is suitable for use
with TestCase.addTypeEqualityFunc.
"""
equality_type_funcs = {}
failureException = AssertionError
@classmethod
def simple_equality(cls, first, second, msg=None):
"""
Classmethod equivalent to unittest.TestCase method (longMessage = False.)
"""
if not first==second:
standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second))
raise cls.failureException(msg or standardMsg)
@classmethod
def assertEqual(cls, first, second, msg=None):
"""
Classmethod equivalent to unittest.TestCase method
"""
asserter = None
if type(first) is type(second):
asserter = cls.equality_type_funcs.get(type(first))
try: basestring = basestring # Python 2
except NameError: basestring = str # Python 3
if asserter is not None:
if isinstance(asserter, basestring):
asserter = getattr(cls, asserter)
if asserter is None:
asserter = cls.simple_equality
if msg is None:
asserter(first, second)
else:
asserter(first, second, msg=msg)
class Comparison(ComparisonInterface):
"""
Class used for comparing two HoloViews objects, including complex
composite objects. Comparisons are available as classmethods, the
most general being the assertEqual method that is intended to work
with any input.
For instance, to test if two Image objects are equal you can use:
Comparison.assertEqual(matrix1, matrix2)
"""
# someone might prefer to use a different function, e.g. assert_all_close
assert_array_almost_equal_fn = partial(assert_array_almost_equal, decimal=6)
@classmethod
def register(cls):
# Float comparisons
cls.equality_type_funcs[float] = cls.compare_floats
cls.equality_type_funcs[np.float] = cls.compare_floats
cls.equality_type_funcs[np.float32] = cls.compare_floats
cls.equality_type_funcs[np.float64] = cls.compare_floats
# List and tuple comparisons
cls.equality_type_funcs[list] = cls.compare_lists
cls.equality_type_funcs[tuple] = cls.compare_tuples
# Dictionary comparisons
cls.equality_type_funcs[dict] = cls.compare_dictionaries
cls.equality_type_funcs[OrderedDict] = cls.compare_dictionaries
# Numpy array comparison
cls.equality_type_funcs[np.ndarray] = cls.compare_arrays
cls.equality_type_funcs[np.ma.masked_array] = cls.compare_arrays
# Pandas dataframe comparison
if pd:
cls.equality_type_funcs[pd.DataFrame] = cls.compare_dataframe
# Dimension objects
cls.equality_type_funcs[Dimension] = cls.compare_dimensions
cls.equality_type_funcs[Dimensioned] = cls.compare_dimensioned # Used in unit tests
cls.equality_type_funcs[Element] = cls.compare_elements # Used in unit tests
# Composition (+ and *)
cls.equality_type_funcs[Overlay] = cls.compare_overlays
cls.equality_type_funcs[Layout] = cls.compare_layouttrees
cls.equality_type_funcs[Empty] = cls.compare_empties
# Annotations
cls.equality_type_funcs[VLine] = cls.compare_vline
cls.equality_type_funcs[HLine] = cls.compare_hline
cls.equality_type_funcs[Spline] = cls.compare_spline
cls.equality_type_funcs[Arrow] = cls.compare_arrow
cls.equality_type_funcs[Text] = cls.compare_text
cls.equality_type_funcs[Div] = cls.compare_div
# Path comparisons
cls.equality_type_funcs[Path] = cls.compare_paths
cls.equality_type_funcs[Contours] = cls.compare_contours
cls.equality_type_funcs[Polygons] = cls.compare_polygons
cls.equality_type_funcs[Box] = cls.compare_box
cls.equality_type_funcs[Ellipse] = cls.compare_ellipse
cls.equality_type_funcs[Bounds] = cls.compare_bounds
# Rasters
cls.equality_type_funcs[Image] = cls.compare_image
cls.equality_type_funcs[RGB] = cls.compare_rgb
cls.equality_type_funcs[HSV] = cls.compare_hsv
cls.equality_type_funcs[Raster] = cls.compare_raster
cls.equality_type_funcs[QuadMesh] = cls.compare_quadmesh
cls.equality_type_funcs[Surface] = cls.compare_surface
cls.equality_type_funcs[HeatMap] = cls.compare_dataset
# Charts
cls.equality_type_funcs[Dataset] = cls.compare_dataset
cls.equality_type_funcs[Curve] = cls.compare_curve
cls.equality_type_funcs[ErrorBars] = cls.compare_errorbars
cls.equality_type_funcs[Spread] = cls.compare_spread
cls.equality_type_funcs[Area] = cls.compare_area
cls.equality_type_funcs[Scatter] = cls.compare_scatter
cls.equality_type_funcs[Scatter3D] = cls.compare_scatter3d
cls.equality_type_funcs[TriSurface] = cls.compare_trisurface
cls.equality_type_funcs[Trisurface] = cls.compare_trisurface
cls.equality_type_funcs[Histogram] = cls.compare_histogram
cls.equality_type_funcs[Bars] = cls.compare_bars
cls.equality_type_funcs[Spikes] = cls.compare_spikes
cls.equality_type_funcs[BoxWhisker] = cls.compare_boxwhisker
cls.equality_type_funcs[VectorField] = cls.compare_vectorfield
# Graphs
cls.equality_type_funcs[Graph] = cls.compare_graph
cls.equality_type_funcs[Nodes] = cls.compare_nodes
cls.equality_type_funcs[EdgePaths] = cls.compare_edgepaths
cls.equality_type_funcs[TriMesh] = cls.compare_trimesh
# Tables
cls.equality_type_funcs[ItemTable] = cls.compare_itemtables
cls.equality_type_funcs[Table] = cls.compare_tables
cls.equality_type_funcs[Points] = cls.compare_points
# Statistical
cls.equality_type_funcs[Bivariate] = cls.compare_bivariate
cls.equality_type_funcs[Distribution] = cls.compare_distribution
cls.equality_type_funcs[HexTiles] = cls.compare_hextiles
# NdMappings
cls.equality_type_funcs[NdLayout] = cls.compare_gridlayout
cls.equality_type_funcs[AdjointLayout] = cls.compare_adjointlayouts
cls.equality_type_funcs[NdOverlay] = cls.compare_ndoverlays
cls.equality_type_funcs[GridSpace] = cls.compare_grids
cls.equality_type_funcs[GridMatrix] = cls.compare_grids
cls.equality_type_funcs[HoloMap] = cls.compare_holomap
cls.equality_type_funcs[DynamicMap] = cls.compare_dynamicmap
# Option objects
cls.equality_type_funcs[Options] = cls.compare_options
cls.equality_type_funcs[Cycle] = cls.compare_cycles
return cls.equality_type_funcs
@classmethod
def compare_dictionaries(cls, d1, d2, msg='Dictionaries'):
keys= set(d1.keys())
keys2 = set(d2.keys())
symmetric_diff = keys ^ keys2
if symmetric_diff:
msg = ("Dictionaries have different sets of keys: %r\n\n"
% symmetric_diff)
msg += "Dictionary 1: %s\n" % d1
msg += "Dictionary 2: %s" % d2
raise cls.failureException(msg)
for k in keys:
cls.assertEqual(d1[k], d2[k])
@classmethod
def compare_lists(cls, l1, l2, msg=None):
try:
cls.assertEqual(len(l1), len(l2))
for v1, v2 in zip(l1, l2):
cls.assertEqual(v1, v2)
except AssertionError:
raise AssertionError(msg or '%s != %s' % (repr(l1), repr(l2)))
@classmethod
def compare_tuples(cls, t1, t2, msg=None):
try:
cls.assertEqual(len(t1), len(t2))
for i1, i2 in zip(t1, t2):
cls.assertEqual(i1, i2)
except AssertionError:
raise AssertionError(msg or '%s != %s' % (repr(t1), repr(t2)))
#=====================#
# Literal comparisons #
#=====================#
@classmethod
def compare_floats(cls, arr1, arr2, msg='Floats'):
cls.compare_arrays(arr1, arr2, msg)
@classmethod
def compare_arrays(cls, arr1, arr2, msg='Arrays'):
try:
if arr1.dtype.kind == 'M':
arr1 = arr1.astype('datetime64[ns]').astype('int64')
if arr2.dtype.kind == 'M':
arr2 = arr2.astype('datetime64[ns]').astype('int64')
assert_array_equal(arr1, arr2)
except:
try:
cls.assert_array_almost_equal_fn(arr1, arr2)
except AssertionError as e:
raise cls.failureException(msg + str(e)[11:])
@classmethod
def bounds_check(cls, el1, el2, msg=None):
lbrt1 = el1.bounds.lbrt()
lbrt2 = el2.bounds.lbrt()
try:
for v1, v2 in zip(lbrt1, lbrt2):
if isinstance(v1, datetime_types):
v1 = dt_to_int(v1)
if isinstance(v2, datetime_types):
v2 = dt_to_int(v2)
cls.assert_array_almost_equal_fn(v1, v2)
except AssertionError:
raise cls.failureException("BoundingBoxes are mismatched: %s != %s."
% (el1.bounds.lbrt(), el2.bounds.lbrt()))
#=======================================#
# Dimension and Dimensioned comparisons #
#=======================================#
@classmethod
def compare_dimensions(cls, dim1, dim2, msg=None):
# 'Weak' equality semantics
if dim1.name != dim2.name:
raise cls.failureException("Dimension names mismatched: %s != %s"
% (dim1.name, dim2.name))
if dim1.label != dim2.label:
raise cls.failureException("Dimension labels mismatched: %s != %s"
% (dim1.label, dim2.label))
# 'Deep' equality of dimension metadata (all parameters)
dim1_params = dict(dim1.get_param_values())
dim2_params = dict(dim2.get_param_values())
# Special handling of deprecated 'initial' values argument
dim1_params['values'] = [] if dim1.values=='initial' else dim1.values
dim2_params['values'] = [] if dim2.values=='initial' else dim2.values
if set(dim1_params.keys()) != set(dim2_params.keys()):
raise cls.failureException("Dimension parameter sets mismatched: %s != %s"
% (set(dim1_params.keys()), set(dim2_params.keys())))
for k in dim1_params.keys():
if (dim1.params(k).__class__.__name__ == 'Callable'
and dim2.params(k).__class__.__name__ == 'Callable'):
continue
try: # This is needed as two lists are not compared by contents using ==
cls.assertEqual(dim1_params[k], dim2_params[k], msg=None)
except AssertionError as e:
msg = 'Dimension parameter %r mismatched: ' % k
raise cls.failureException("%s%s" % (msg, str(e)))
@classmethod
def compare_labelled_data(cls, obj1, obj2, msg=None):
cls.assertEqual(obj1.group, obj2.group, "Group labels mismatched.")
cls.assertEqual(obj1.label, obj2.label, "Labels mismatched.")
@classmethod
def compare_dimension_lists(cls, dlist1, dlist2, msg='Dimension lists'):
if len(dlist1) != len(dlist2):
raise cls.failureException('%s mismatched' % msg)
for d1, d2 in zip(dlist1, dlist2):
cls.assertEqual(d1, d2)
@classmethod
def compare_dimensioned(cls, obj1, obj2, msg=None):
cls.compare_labelled_data(obj1, obj2)
cls.compare_dimension_lists(obj1.vdims, obj2.vdims,
'Value dimension list')
cls.compare_dimension_lists(obj1.kdims, obj2.kdims,
'Key dimension list')
@classmethod
def compare_elements(cls, obj1, obj2, msg=None):
cls.compare_labelled_data(obj1, obj2)
cls.assertEqual(obj1.data, obj2.data)
#===============================#
# Compositional trees (+ and *) #
#===============================#
@classmethod
def compare_trees(cls, el1, el2, msg='Trees'):
if len(el1.keys()) != len(el2.keys()):
raise cls.failureException("%s have mismatched path counts." % msg)
if el1.keys() != el2.keys():
raise cls.failureException("%s have mismatched paths." % msg)
for element1, element2 in zip(el1.values(), el2.values()):
cls.assertEqual(element1, element2)
@classmethod
def compare_layouttrees(cls, el1, el2, msg=None):
cls.compare_dimensioned(el1, el2)
cls.compare_trees(el1, el2, msg='Layouts')
@classmethod
def compare_empties(cls, el1, el2, msg=None):
if not all(isinstance(el, Empty) for el in [el1, el2]):
raise cls.failureException("Compared elements are not both Empty()")
@classmethod
def compare_overlays(cls, el1, el2, msg=None):
cls.compare_dimensioned(el1, el2)
cls.compare_trees(el1, el2, msg='Overlays')
#================================#
# AttrTree and Map based classes #
#================================#
@classmethod
def compare_ndmappings(cls, el1, el2, msg='NdMappings'):
cls.compare_dimensioned(el1, el2)
if len(el1.keys()) != len(el2.keys()):
raise cls.failureException("%s have different numbers of keys." % msg)
if set(el1.keys()) != set(el2.keys()):
diff1 = [el for el in el1.keys() if el not in el2.keys()]
diff2 = [el for el in el2.keys() if el not in el1.keys()]
raise cls.failureException("%s have different sets of keys. " % msg
+ "In first, not second %s. " % diff1
+ "In second, not first: %s." % diff2)
for element1, element2 in zip(el1, el2):
cls.assertEqual(element1, element2)
@classmethod
def compare_holomap(cls, el1, el2, msg='HoloMaps'):
cls.compare_dimensioned(el1, el2)
cls.compare_ndmappings(el1, el2, msg)
@classmethod
def compare_dynamicmap(cls, el1, el2, msg='DynamicMap'):
cls.compare_dimensioned(el1, el2)
cls.compare_ndmappings(el1, el2, msg)
@classmethod
def compare_gridlayout(cls, el1, el2, msg=None):
cls.compare_dimensioned(el1, el2)
if len(el1) != len(el2):
raise cls.failureException("Layouts have different sizes.")
if set(el1.keys()) != set(el2.keys()):
raise cls.failureException("Layouts have different keys.")
for element1, element2 in zip(el1, el2):
cls.assertEqual(element1,element2)
@classmethod
def compare_ndoverlays(cls, el1, el2, msg=None):
cls.compare_dimensioned(el1, el2)
if len(el1) != len(el2):
raise cls.failureException("NdOverlays have different lengths.")
for (layer1, layer2) in zip(el1, el2):
cls.assertEqual(layer1, layer2)
@classmethod
def compare_adjointlayouts(cls, el1, el2, msg=None):
cls.compare_dimensioned(el1, el2)
for element1, element2 in zip(el1, el1):
cls.assertEqual(element1, element2)
#=============#
# Annotations #
#=============#
@classmethod
def compare_annotation(cls, el1, el2, msg='Annotation'):
cls.compare_dimensioned(el1, el2)
cls.assertEqual(el1.data, el2.data)
@classmethod
def compare_hline(cls, el1, el2, msg='HLine'):
cls.compare_annotation(el1, el2, msg=msg)
@classmethod
def compare_vline(cls, el1, el2, msg='VLine'):
cls.compare_annotation(el1, el2, msg=msg)
@classmethod
def compare_spline(cls, el1, el2, msg='Spline'):
cls.compare_annotation(el1, el2, msg=msg)
@classmethod
def compare_arrow(cls, el1, el2, msg='Arrow'):
cls.compare_annotation(el1, el2, msg=msg)
@classmethod
def compare_text(cls, el1, el2, msg='Text'):
cls.compare_annotation(el1, el2, msg=msg)
@classmethod
def compare_div(cls, el1, el2, msg='Div'):
cls.compare_annotation(el1, el2, msg=msg)
#=======#
# Paths #
#=======#
@classmethod
def compare_paths(cls, el1, el2, msg='Path'):
cls.compare_dataset(el1, el2, msg)
paths1 = el1.split()
paths2 = el2.split()
if len(paths1) != len(paths2):
raise cls.failureException("%s objects do not have a matching number of paths." % msg)
for p1, p2 in zip(paths1, paths2):
cls.compare_dataset(p1, p2, '%s data' % msg)
@classmethod
def compare_contours(cls, el1, el2, msg='Contours'):
cls.compare_paths(el1, el2, msg=msg)
@classmethod
def compare_polygons(cls, el1, el2, msg='Polygons'):
cls.compare_paths(el1, el2, msg=msg)
@classmethod
def compare_box(cls, el1, el2, msg='Box'):
cls.compare_paths(el1, el2, msg=msg)
@classmethod
def compare_ellipse(cls, el1, el2, msg='Ellipse'):
cls.compare_paths(el1, el2, msg=msg)
@classmethod
def compare_bounds(cls, el1, el2, msg='Bounds'):
cls.compare_paths(el1, el2, msg=msg)
#========#
# Charts #
#========#
@classmethod
def compare_dataset(cls, el1, el2, msg='Dataset'):
cls.compare_dimensioned(el1, el2)
if el1.shape[0] != el2.shape[0]:
raise AssertionError("%s not of matching length." % msg)
dimension_data = [(d, el1[d], el2[d]) for d in el1.dimensions()]
for dim, d1, d2 in dimension_data:
if d1.dtype != d2.dtype:
cls.failureException("%s %s columns have different type." % (msg, dim.pprint_label)
+ " First has type %s, and second has type %s."
% (d1, d2))
if d1.dtype.kind in 'SUOV':
if list(d1) == list(d2):
cls.failureException("%s along dimension %s not equal." %
(msg, dim.pprint_label))
else:
cls.compare_arrays(d1, d2, msg)
@classmethod
def compare_curve(cls, el1, el2, msg='Curve'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_errorbars(cls, el1, el2, msg='ErrorBars'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_spread(cls, el1, el2, msg='Spread'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_area(cls, el1, el2, msg='Area'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_scatter(cls, el1, el2, msg='Scatter'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_scatter3d(cls, el1, el2, msg='Scatter3D'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_trisurface(cls, el1, el2, msg='TriSurface'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_histogram(cls, el1, el2, msg='Histogram'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_points(cls, el1, el2, msg='Points'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_vectorfield(cls, el1, el2, msg='VectorField'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_bars(cls, el1, el2, msg='Bars'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_spikes(cls, el1, el2, msg='Spikes'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_boxwhisker(cls, el1, el2, msg='BoxWhisker'):
cls.compare_dataset(el1, el2, msg)
#=========#
# Graphs #
#=========#
@classmethod
def compare_graph(cls, el1, el2, msg='Graph'):
cls.compare_dataset(el1, el2, msg)
cls.compare_nodes(el1.nodes, el2.nodes, msg)
if el1._edgepaths or el2._edgepaths:
cls.compare_edgepaths(el1.edgepaths, el2.edgepaths, msg)
@classmethod
def compare_trimesh(cls, el1, el2, msg='TriMesh'):
cls.compare_graph(el1, el2, msg)
@classmethod
def compare_nodes(cls, el1, el2, msg='Nodes'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_edgepaths(cls, el1, el2, msg='Nodes'):
cls.compare_paths(el1, el2, msg)
#=========#
# Rasters #
#=========#
@classmethod
def compare_raster(cls, el1, el2, msg='Raster'):
cls.compare_dimensioned(el1, el2)
cls.compare_arrays(el1.data, el2.data, msg)
@classmethod
def compare_quadmesh(cls, el1, el2, msg='QuadMesh'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_heatmap(cls, el1, el2, msg='HeatMap'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_image(cls, el1, el2, msg='Image'):
cls.bounds_check(el1,el2)
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_rgb(cls, el1, el2, msg='RGB'):
cls.bounds_check(el1,el2)
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_hsv(cls, el1, el2, msg='HSV'):
cls.bounds_check(el1,el2)
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_surface(cls, el1, el2, msg='Surface'):
cls.bounds_check(el1,el2)
cls.compare_dataset(el1, el2, msg)
#========#
# Tables #
#========#
@classmethod
def compare_itemtables(cls, el1, el2, msg=None):
cls.compare_dimensioned(el1, el2)
if el1.rows != el2.rows:
raise cls.failureException("ItemTables have different numbers of rows.")
if el1.cols != el2.cols:
raise cls.failureException("ItemTables have different numbers of columns.")
if [d.name for d in el1.vdims] != [d.name for d in el2.vdims]:
raise cls.failureException("ItemTables have different Dimensions.")
@classmethod
def compare_tables(cls, el1, el2, msg='Table'):
cls.compare_dataset(el1, el2, msg)
#========#
# Pandas #
#========#
@classmethod
def compare_dataframe(cls, df1, df2, msg='DFrame'):
from pandas.util.testing import assert_frame_equal
try:
assert_frame_equal(df1, df2)
except AssertionError as e:
raise cls.failureException(msg+': '+str(e))
#============#
# Statistics #
#============#
@classmethod
def compare_distribution(cls, el1, el2, msg='Distribution'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_bivariate(cls, el1, el2, msg='Bivariate'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_hextiles(cls, el1, el2, msg='HexTiles'):
cls.compare_dataset(el1, el2, msg)
#=======#
# Grids #
#=======#
@classmethod
def _compare_grids(cls, el1, el2, name):
if len(el1.keys()) != len(el2.keys()):
raise cls.failureException("%ss have different numbers of items." % name)
if set(el1.keys()) != set(el2.keys()):
raise cls.failureException("%ss have different keys." % name)
if len(el1) != len(el2):
raise cls.failureException("%ss have different depths." % name)
for element1, element2 in zip(el1, el2):
cls.assertEqual(element1, element2)
@classmethod
def compare_grids(cls, el1, el2, msg=None):
cls.compare_dimensioned(el1, el2)
cls._compare_grids(el1, el2, 'GridSpace')
#=========#
# Options #
#=========#
@classmethod
def compare_options(cls, options1, options2, msg=None):
cls.assertEqual(options1.kwargs, options2.kwargs)
@classmethod
def compare_cycles(cls, cycle1, cycle2, msg=None):
cls.assertEqual(cycle1.values, cycle2.values)
@classmethod
def compare_channelopts(cls, opt1, opt2, msg=None):
cls.assertEqual(opt1.mode, opt2.mode)
cls.assertEqual(opt1.pattern, opt2.pattern)
cls.assertEqual(opt1.patter, opt2.pattern)
class ComparisonTestCase(Comparison, TestCase):
"""
Class to integrate the Comparison class with unittest.TestCase.
"""
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
registry = Comparison.register()
for k, v in registry.items():
self.addTypeEqualityFunc(k, v)
| bsd-3-clause |
abhishekgahlot/scikit-learn | examples/decomposition/plot_ica_blind_source_separation.py | 349 | 2228 | """
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| bsd-3-clause |
cangermueller/deepcpg | scripts/dcpg_train.py | 1 | 28511 | #!/usr/bin/env python
"""Train a DeepCpG model to predict DNA methylation.
Trains a DeepCpG model on DNA (DNA model), neighboring methylation states
(CpG model), or both (Joint model) to predict CpG methylation of multiple cells.
Allows to fine-tune individual models or to train them from scratch.
Examples
--------
Train a DNA model on chromosome 1, 3, and 5, and use chromosome 13, 14, and
15 for validation:
.. code:: bash
dcpg_train.py
./data/c{1,3,5}_*.h5
--val_files ./data/c{13,14,15}_*.h5
--dna_model CnnL2h128
--out_dir ./models/dna
Train a CpG model:
.. code:: bash
dcpg_train.py
./data/c{1,3,5}_*.h5
--val_files ./data/c{13,14,15}_*.h5
--cpg_model RnnL1
--out_dir ./models/cpg
Train a Joint model using a pre-trained DNA and CpG model:
.. code:: bash
dcpg_train.py
./data/c{1,3,5}_*.h5
--val_files ./data/c{13,14,15}_*.h5
--dna_model ./models/dna
--cpg_model ./models/cpg
--joint_model JointL2h512
--train_models joint
--out_dir ./models/joint
See Also
--------
* ``dcpg_eval.py``: For evaluating a trained model and imputing methylation
profiles.
"""
from __future__ import print_function
from __future__ import division
from collections import OrderedDict
import os
import random
import re
import sys
import argparse
import h5py as h5
import logging
import numpy as np
import pandas as pd
import six
from six.moves import range
from keras import callbacks as kcbk
from keras.models import Model
from keras.optimizers import Adam
from deepcpg import callbacks as cbk
from deepcpg import data as dat
from deepcpg import metrics as met
from deepcpg import models as mod
from deepcpg.models.utils import is_input_layer, is_output_layer
from deepcpg.data import hdf, OUTPUT_SEP
from deepcpg.utils import format_table, make_dir, EPS
LOG_PRECISION = 4
CLA_METRICS = [met.acc]
REG_METRICS = [met.mse, met.mae]
def remove_outputs(model):
while is_output_layer(model.layers[-1], model):
model.layers.pop()
model.outputs = [model.layers[-1].output]
model.layers[-1].outbound_nodes = []
model.output_names = None
def rename_layers(model, scope=None):
if not scope:
scope = model.scope
for layer in model.layers:
if is_input_layer(layer) or layer.name.startswith(scope):
continue
layer.name = '%s/%s' % (scope, layer.name)
def get_output_stats(output):
stats = OrderedDict()
output = np.ma.masked_values(output, dat.CPG_NAN)
stats['nb_tot'] = len(output)
stats['nb_obs'] = np.sum(output != dat.CPG_NAN)
stats['frac_obs'] = stats['nb_obs'] / stats['nb_tot']
stats['mean'] = float(np.mean(output))
stats['var'] = float(np.var(output))
return stats
def get_output_weights(output_names, weight_patterns):
regex_weights = dict()
for weight_pattern in weight_patterns:
tmp = [tmp.strip() for tmp in weight_pattern.split('=')]
if len(tmp) != 2:
raise ValueError('Invalid weight pattern "%s"!' % (weight_pattern))
regex_weights[tmp[0]] = float(tmp[1])
output_weights = dict()
for output_name in output_names:
for regex, weight in six.iteritems(regex_weights):
if re.match(regex, output_name):
output_weights[output_name] = weight
if output_name not in output_weights:
output_weights[output_name] = 1.0
return output_weights
def get_class_weights(labels, nb_class=None):
freq = np.bincount(labels) / len(labels)
if nb_class is None:
nb_class = len(freq)
if len(freq) < nb_class:
tmp = np.zeros(nb_class, dtype=freq.dtype)
tmp[:len(freq)] = freq
freq = tmp
weights = 1 / (freq + EPS)
weights /= weights.sum()
return weights
def get_output_class_weights(output_name, output):
output = output[output != dat.CPG_NAN]
_output_name = output_name.split(OUTPUT_SEP)
if _output_name[0] == 'cpg':
weights = get_class_weights(output, 2)
elif _output_name[-1] == 'cat_var':
weights = get_class_weights(output, 3)
elif _output_name[-1] in ['cat2_var', 'diff', 'mode']:
weights = get_class_weights(output, 2)
else:
return None
weights = OrderedDict(zip(range(len(weights)), weights))
return weights
def perf_logs_str(logs):
t = logs.to_csv(None, sep='\t', float_format='%.4f', index=False)
return t
def get_metrics(output_name):
_output_name = output_name.split(OUTPUT_SEP)
if _output_name[0] == 'cpg':
metrics = CLA_METRICS
elif _output_name[0] == 'bulk':
metrics = REG_METRICS + CLA_METRICS
elif _output_name[-1] in ['diff', 'mode', 'cat2_var']:
metrics = CLA_METRICS
elif _output_name[-1] == 'mean':
metrics = REG_METRICS + CLA_METRICS
elif _output_name[-1] == 'var':
metrics = REG_METRICS
elif _output_name[-1] == 'cat_var':
metrics = [met.cat_acc]
else:
raise ValueError('Invalid output name "%s"!' % output_name)
return metrics
class App(object):
def run(self, args):
name = os.path.basename(args[0])
parser = self.create_parser(name)
opts = parser.parse_args(args[1:])
return self.main(name, opts)
def create_parser(self, name):
p = argparse.ArgumentParser(
prog=name,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Trains model on DNA (DNA model), neighboring '
'methylation states (CpG model), or both (Joint model) to predict '
'CpG methylation of multiple cells.')
# IO
g = p.add_argument_group('input-output arguments')
g.add_argument(
'train_files',
nargs='+',
help='Training data files')
g.add_argument(
'--val_files',
nargs='+',
help='Validation data files')
g.add_argument(
'-o', '--out_dir',
default='./train',
help='Output directory')
g = p.add_argument_group('arguments to define the model architecture')
models = sorted(list(mod.dna.list_models().keys()))
g.add_argument(
'--dna_model',
help='Name of DNA model or files of existing model.'
' Available models: %s' % ', '.join(models),
nargs='+')
g.add_argument(
'--dna_wlen',
help='DNA window length',
type=int)
models = sorted(list(mod.cpg.list_models().keys()))
g.add_argument(
'--cpg_model',
help='Name of CpG model or files of existing model.'
' Available models: %s' % ', '.join(models),
nargs='+')
g.add_argument(
'--cpg_wlen',
help='CpG window length',
type=int)
models = sorted(list(mod.joint.list_models().keys()))
g.add_argument(
'--joint_model',
help='Name of Joint model.'
' Available models: %s' % ', '.join(models),
default='JointL2h512')
g.add_argument(
'--model_files',
help='Files of existing model',
nargs='+')
g = p.add_argument_group('arguments to define which model components '
'are trained')
g.add_argument(
'--fine_tune',
help='Only train output layers',
action='store_true')
g.add_argument(
'--train_models',
help='Only train the specified models',
choices=['dna', 'cpg', 'joint'],
nargs='+')
g.add_argument(
'--trainable',
help='Regex of layers that should be trained',
nargs='+')
g.add_argument(
'--not_trainable',
help='Regex of layers that should not be trained',
nargs='+')
g.add_argument(
'--freeze_filter',
help='Exclude filter weights of first convolutional layer from '
'training',
action='store_true')
g.add_argument(
'--filter_weights',
help='HDF5 file with weights to be used for initializing filters',
nargs='+')
g = p.add_argument_group('training arguments')
g.add_argument(
'--learning_rate',
help='Learning rate',
type=float,
default=0.0001)
g.add_argument(
'--learning_rate_decay',
help='Exponential learning rate decay factor',
type=float,
default=0.975)
g.add_argument(
'--nb_epoch',
help='Maximum # training epochs',
type=int,
default=30)
g.add_argument(
'--nb_train_sample',
help='Maximum # training samples',
type=int)
g.add_argument(
'--nb_val_sample',
help='Maximum # validation samples',
type=int)
g.add_argument(
'--batch_size',
help='Batch size',
type=int,
default=128)
g.add_argument(
'--early_stopping',
help='Early stopping patience',
type=int,
default=5)
g.add_argument(
'--dropout',
help='Dropout rate',
type=float,
default=0.0)
g.add_argument(
'--l1_decay',
help='L1 weight decay',
type=float,
default=0.0001)
g.add_argument(
'--l2_decay',
help='L2 weight decay',
type=float,
default=0.0001)
g.add_argument(
'--no_tensorboard',
help='Do not store Tensorboard summaries',
action='store_true')
g = p.add_argument_group('arguments to select outputs and weights')
g.add_argument(
'--output_names',
help='Regex to select outputs',
nargs='+',
default=['cpg/.*'])
g.add_argument(
'--nb_output',
type=int,
help='Maximum number of outputs')
g.add_argument(
'--no_class_weights',
help='Do not weight classes',
action='store_true')
g.add_argument(
'--output_weights',
help='Output weights defined as a list of `output`=`weight` '
'patterns, where `output` is a regex of output names, and '
'`weight` the weight that is assigned to them',
nargs='+')
g.add_argument(
'--replicate_names',
help='Regex to select replicates',
nargs='+')
g.add_argument(
'--nb_replicate',
type=int,
help='Maximum number of replicates')
g = p.add_argument_group('advanced arguments')
g.add_argument(
'--max_time',
help='Maximum training time in hours',
type=float)
g.add_argument(
'--stop_file',
help='File that terminates training if it exists')
g.add_argument(
'--seed',
help='Seed of random number generator',
type=int,
default=0)
g.add_argument(
'--no_log_outputs',
help='Do not log performance metrics of individual outputs',
action='store_true')
g.add_argument(
'--verbose',
help='More detailed log messages',
action='store_true')
g.add_argument(
'--log_file',
help='Write log messages to file')
g.add_argument(
'--data_q_size',
help='Size of data generator queue',
type=int,
default=10)
g.add_argument(
'--data_nb_worker',
help='Number of worker for data generator queue',
type=int,
default=1)
return p
def get_callbacks(self):
opts = self.opts
callbacks = []
if opts.val_files:
callbacks.append(kcbk.EarlyStopping(
'val_loss' if opts.val_files else 'loss',
patience=opts.early_stopping,
verbose=1
))
callbacks.append(kcbk.ModelCheckpoint(
os.path.join(opts.out_dir, 'model_weights_train.h5'),
save_best_only=False))
monitor = 'val_loss' if opts.val_files else 'loss'
callbacks.append(kcbk.ModelCheckpoint(
os.path.join(opts.out_dir, 'model_weights_val.h5'),
monitor=monitor,
save_best_only=True, verbose=1
))
max_time = int(opts.max_time * 3600) if opts.max_time else None
callbacks.append(cbk.TrainingStopper(
max_time=max_time,
stop_file=opts.stop_file,
verbose=1
))
def learning_rate_schedule(epoch):
lr = opts.learning_rate * opts.learning_rate_decay**epoch
print('Learning rate: %.3g' % lr)
return lr
callbacks.append(kcbk.LearningRateScheduler(learning_rate_schedule))
def save_lc(epoch, epoch_logs, val_epoch_logs):
logs = {'lc_train.tsv': epoch_logs,
'lc_val.tsv': val_epoch_logs}
for name, logs in six.iteritems(logs):
if not logs:
continue
logs = pd.DataFrame(logs)
with open(os.path.join(opts.out_dir, name), 'w') as f:
f.write(perf_logs_str(logs))
metrics = OrderedDict()
for metric_funs in six.itervalues(self.metrics):
for metric_fun in metric_funs:
metrics[metric_fun.__name__] = True
metrics = ['loss'] + list(metrics.keys())
self.perf_logger = cbk.PerformanceLogger(
callbacks=[save_lc],
metrics=metrics,
precision=LOG_PRECISION,
verbose=not opts.no_log_outputs
)
callbacks.append(self.perf_logger)
if not opts.no_tensorboard:
callbacks.append(kcbk.TensorBoard(
log_dir=opts.out_dir,
histogram_freq=0,
write_graph=True,
write_images=True
))
return callbacks
def print_output_stats(self, output_stats):
table = OrderedDict()
for name, stats in six.iteritems(output_stats):
table.setdefault('name', []).append(name)
for key in stats:
table.setdefault(key, []).append(stats[key])
print('Output statistics:')
print(format_table(table))
print()
def print_class_weights(self, class_weights):
table = OrderedDict()
for name, class_weight in six.iteritems(class_weights):
if not class_weight:
continue
column = []
for cla, weight in six.iteritems(class_weight):
column.append('%s=%.2f' % (cla, weight))
table[name] = column
if table:
print('Class weights:')
print(format_table(table))
print()
def build_dna_model(self):
opts = self.opts
log = self.log
if os.path.exists(opts.dna_model[0]):
log.info('Loading existing DNA model ...')
dna_model = mod.load_model(opts.dna_model, log=log.info)
remove_outputs(dna_model)
rename_layers(dna_model, 'dna')
else:
log.info('Building DNA model ...')
dna_model_builder = mod.dna.get(opts.dna_model[0])(
l1_decay=opts.l1_decay,
l2_decay=opts.l2_decay,
dropout=opts.dropout)
dna_wlen = dat.get_dna_wlen(opts.train_files[0], opts.dna_wlen)
dna_inputs = dna_model_builder.inputs(dna_wlen)
dna_model = dna_model_builder(dna_inputs)
return dna_model
def build_cpg_model(self):
opts = self.opts
log = self.log
replicate_names = dat.get_replicate_names(
opts.train_files[0],
regex=opts.replicate_names,
nb_key=opts.nb_replicate)
if not replicate_names:
raise ValueError('No replicates found!')
print('Replicate names:')
print(', '.join(replicate_names))
print()
cpg_wlen = dat.get_cpg_wlen(opts.train_files[0], opts.cpg_wlen)
if os.path.exists(opts.cpg_model[0]):
log.info('Loading existing CpG model ...')
src_cpg_model = mod.load_model(opts.cpg_model, log=log.info)
remove_outputs(src_cpg_model)
rename_layers(src_cpg_model, 'cpg')
nb_replicate = src_cpg_model.input_shape[0][1]
if nb_replicate != len(replicate_names):
tmp = 'CpG model was trained with %d replicates but %d'
'replicates provided. Copying weight to new model ...'
tmp %= (nb_replicate, len(replicate_names))
log.info('Replicate names differ: '
'Copying weights to new model ...')
cpg_model_builder = mod.cpg.get(src_cpg_model.name)(
l1_decay=opts.l1_decay,
l2_decay=opts.l2_decay,
dropout=opts.dropout)
cpg_inputs = cpg_model_builder.inputs(cpg_wlen, replicate_names)
cpg_model = cpg_model_builder(cpg_inputs)
mod.copy_weights(src_cpg_model, cpg_model)
else:
cpg_model = src_cpg_model
else:
log.info('Building CpG model ...')
cpg_model_builder = mod.cpg.get(opts.cpg_model[0])(
l1_decay=opts.l1_decay,
l2_decay=opts.l2_decay,
dropout=opts.dropout)
cpg_inputs = cpg_model_builder.inputs(cpg_wlen, replicate_names)
cpg_model = cpg_model_builder(cpg_inputs)
return cpg_model
def build_model(self):
opts = self.opts
log = self.log
output_names = dat.get_output_names(opts.train_files[0],
regex=opts.output_names,
nb_key=opts.nb_output)
if not output_names:
raise ValueError('No outputs found!')
dna_model = None
if opts.dna_model:
dna_model = self.build_dna_model()
cpg_model = None
if opts.cpg_model:
cpg_model = self.build_cpg_model()
if dna_model is not None and cpg_model is not None:
log.info('Joining models ...')
joint_model_builder = mod.joint.get(opts.joint_model)(
l1_decay=opts.l1_decay,
l2_decay=opts.l2_decay,
dropout=opts.dropout)
stem = joint_model_builder([dna_model, cpg_model])
stem.name = '_'.join([stem.name, dna_model.name, cpg_model.name])
elif dna_model is not None:
stem = dna_model
elif cpg_model is not None:
stem = cpg_model
else:
log.info('Loading existing model ...')
stem = mod.load_model(opts.model_files, log=log.info)
if sorted(output_names) == sorted(stem.output_names):
return stem
log.info('Removing existing output layers ...')
remove_outputs(stem)
outputs = mod.add_output_layers(stem.outputs[0], output_names)
model = Model(inputs=stem.inputs, outputs=outputs, name=stem.name)
return model
def set_trainability(self, model):
opts = self.opts
trainable = []
not_trainable = []
if opts.fine_tune:
not_trainable.append('.*')
elif opts.train_models:
not_trainable.append('.*')
for name in opts.train_models:
trainable.append('%s/' % name)
if opts.freeze_filter:
not_trainable.append(mod.get_first_conv_layer(model.layers).name)
if not trainable and opts.trainable:
trainable = opts.trainable
if not not_trainable and opts.not_trainable:
not_trainable = opts.not_trainable
if not trainable and not not_trainable:
return
table = OrderedDict()
table['layer'] = []
table['trainable'] = []
for layer in model.layers:
if is_input_layer(layer) or is_output_layer(layer, model):
continue
if not hasattr(layer, 'trainable'):
continue
for regex in not_trainable:
if re.match(regex, layer.name):
layer.trainable = False
for regex in trainable:
if re.match(regex, layer.name):
layer.trainable = True
table['layer'].append(layer.name)
table['trainable'].append(layer.trainable)
print('Layer trainability:')
print(format_table(table))
print()
def init_filter_weights(self, filename, conv_layer):
h5_file = h5.File(filename[0], 'r')
group = h5_file
if len(filename) > 1:
group = h5_file[filename[1]]
weights = group['weights'].value
bias = None
if 'bias' in group:
bias = group['bias'].value
h5_file.close()
assert weights.ndim == 4
if weights.shape[1] != 1:
weights = weights[:, :, :, 0]
weights = np.swapaxes(weights, 0, 2)
weights = np.expand_dims(weights, 1)
# filter_size x 1 x 4 x nb_filter
cur_weights, cur_bias = conv_layer.get_weights()
# Adapt number of filters
tmp = min(weights.shape[-1], cur_weights.shape[-1])
weights = weights[:, :, :, :tmp]
# Adapt filter size
if len(weights) > len(cur_weights):
# Truncate weights
idx = (len(weights) - len(cur_weights)) // 2
weights = weights[idx:(idx + len(cur_weights))]
elif len(weights) < len(cur_weights):
# Pad weights
shape = [len(cur_weights)] + list(weights.shape[1:])
pad_weights = np.random.uniform(0, 1, shape) * 1e-2
idx = (len(cur_weights) - len(weights)) // 2
pad_weights[idx:(idx + len(weights))] = weights
weights = pad_weights
assert np.all(weights.shape[:-1] == cur_weights.shape[:-1])
cur_weights[:, :, :, :weights.shape[-1]] = weights
if bias is not None:
bias = bias[:len(cur_bias)]
cur_bias[:len(bias)] = bias
conv_layer.set_weights((cur_weights, cur_bias))
print('%d filters initialized' % weights.shape[-1])
def main(self, name, opts):
logging.basicConfig(filename=opts.log_file,
format='%(levelname)s (%(asctime)s): %(message)s')
log = logging.getLogger(name)
if opts.verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
if opts.seed is not None:
np.random.seed(opts.seed)
random.seed(opts.seed)
self.log = log
self.opts = opts
make_dir(opts.out_dir)
log.info('Building model ...')
model = self.build_model()
model.summary()
self.set_trainability(model)
if opts.filter_weights:
conv_layer = mod.get_first_conv_layer(model.layers)
log.info('Initializing filters of %s ...' % conv_layer.name)
self.init_filter_weights(opts.filter_weights, conv_layer)
mod.save_model(model, os.path.join(opts.out_dir, 'model.json'))
log.info('Computing output statistics ...')
output_names = model.output_names
output_stats = OrderedDict()
if opts.no_class_weights:
class_weights = None
else:
class_weights = OrderedDict()
for name in output_names:
output = hdf.read(opts.train_files, 'outputs/%s' % name,
nb_sample=opts.nb_train_sample)
output = list(output.values())[0]
output_stats[name] = get_output_stats(output)
if class_weights is not None:
class_weights[name] = get_output_class_weights(name, output)
self.print_output_stats(output_stats)
if class_weights:
self.print_class_weights(class_weights)
output_weights = None
if opts.output_weights:
log.info('Initializing output weights ...')
output_weights = get_output_weights(output_names,
opts.output_weights)
print('Output weights:')
for output_name in output_names:
if output_name in output_weights:
print('%s: %.2f' % (output_name,
output_weights[output_name]))
print()
self.metrics = dict()
for output_name in output_names:
self.metrics[output_name] = get_metrics(output_name)
optimizer = Adam(lr=opts.learning_rate)
model.compile(optimizer=optimizer,
loss=mod.get_objectives(output_names),
loss_weights=output_weights,
metrics=self.metrics)
log.info('Loading data ...')
replicate_names = dat.get_replicate_names(
opts.train_files[0],
regex=opts.replicate_names,
nb_key=opts.nb_replicate)
data_reader = mod.data_reader_from_model(
model, replicate_names=replicate_names)
nb_train_sample = dat.get_nb_sample(opts.train_files,
opts.nb_train_sample)
train_data = data_reader(opts.train_files,
class_weights=class_weights,
batch_size=opts.batch_size,
nb_sample=nb_train_sample,
shuffle=True,
loop=True)
if opts.val_files:
nb_val_sample = dat.get_nb_sample(opts.val_files,
opts.nb_val_sample)
val_data = data_reader(opts.val_files,
batch_size=opts.batch_size,
nb_sample=nb_val_sample,
shuffle=False,
loop=True)
else:
val_data = None
nb_val_sample = None
log.info('Initializing callbacks ...')
callbacks = self.get_callbacks()
log.info('Training model ...')
print()
print('Training samples: %d' % nb_train_sample)
if nb_val_sample:
print('Validation samples: %d' % nb_val_sample)
model.fit_generator(
train_data,
steps_per_epoch=nb_train_sample // opts.batch_size,
epochs=opts.nb_epoch,
callbacks=callbacks,
validation_data=val_data,
validation_steps=nb_val_sample // opts.batch_size,
max_queue_size=opts.data_q_size,
workers=opts.data_nb_worker,
verbose=0)
print('\nTraining set performance:')
print(format_table(self.perf_logger.epoch_logs,
precision=LOG_PRECISION))
if self.perf_logger.val_epoch_logs:
print('\nValidation set performance:')
print(format_table(self.perf_logger.val_epoch_logs,
precision=LOG_PRECISION))
# Restore model with highest validation performance
filename = os.path.join(opts.out_dir, 'model_weights_val.h5')
if os.path.isfile(filename):
model.load_weights(filename)
# Delete metrics since they cause problems when loading the model
# from HDF5 file. Metrics can be loaded from json + weights file.
model.metrics = None
model.metrics_names = None
model.metrics_tensors = None
model.save(os.path.join(opts.out_dir, 'model.h5'))
log.info('Done!')
return 0
if __name__ == '__main__':
app = App()
app.run(sys.argv)
| mit |
eickenberg/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
dkriegner/xrayutilities | examples/simpack_xrr_diffuse.py | 1 | 2087 | # This file is part of xrayutilities.
#
# xrayutilities is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2018 Dominik Kriegner <[email protected]>
import time
import xrayutilities as xu
from matplotlib.pylab import *
sub = xu.simpack.Layer(xu.materials.Si, inf, roughness=1, lat_correl=100)
lay1 = xu.simpack.Layer(xu.materials.Si, 200, roughness=1, lat_correl=200)
lay2 = xu.simpack.Layer(xu.materials.Ge, 70, roughness=3, lat_correl=50)
ls = xu.simpack.LayerStack('SL 5', sub+5*(lay2+lay1))
alphai = arange(0.17, 2, 0.001)
print("calculate method=1, H=1, vert=0")
start = time.time()
m = xu.simpack.DiffuseReflectivityModel(ls, sample_width=10, beam_width=1,
energy='CuKa1', vert_correl=1000,
vert_nu=0, H=1, method=1, vert_int=0)
d1 = m.simulate(alphai)
print("elapsed time: %.4f" % (time.time() - start))
print("calculate method=2, H=1, vert=0")
start = time.time()
m = xu.simpack.DiffuseReflectivityModel(ls, sample_width=10, beam_width=1,
energy='CuKa1', vert_correl=1000,
vert_nu=0, H=1, method=2, vert_int=0)
d2 = m.simulate(alphai)
print("elapsed time: %.4f" % (time.time() - start))
figure()
semilogy(alphai, d1, label='method=1')
semilogy(alphai, d2, label='method=2')
legend()
xlabel('incidence angle (deg)')
ylabel('intensity (arb. u.)')
tight_layout()
| gpl-2.0 |
UManPychron/pychron | pychron/core/ui/qt/map_editor.py | 2 | 4675 | # ===============================================================================
# Copyright 2019 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import smopy
from PIL import Image
from pyface.qt.QtGui import QPainter, QFrame, QPixmap, QImage
from traitsui.qt4.basic_editor_factory import BasicEditorFactory
from traitsui.qt4.editor import Editor
class QMapWidget(QFrame):
def __init__(self, *args, **kw):
super(QMapWidget, self).__init__(*args, **kw)
self._pix_maps = []
# self._pix_map = QPixmap(self.width(), self.height())
# self._pix_map.fill(Qt.transparent)
def set_tile(self, image):
data = image.tobytes('raw', 'RGB')
im = QImage(data, image.size[0], image.size[1], QImage.Format_RGB888)
pix = QPixmap.fromImage(im)
self._pix_maps.append(pix)
# print(self._pix_map)
self.update()
#
# def resizeEvent(self, event):
# print('asdf', event.size().width())
# esize = event.size()
# csize = self._pix_map.size()
#
# try:
#
# print(csize.width(), esize.width(), csize.height(), esize.height())
# self._pix_map = self._pix_map.scaled(csize.width()/esize.width(),
# csize.height()/esize.height(),)
# except ZeroDivisionError:
# pass
#
# # self.update()
# self.repaint()
def paintEvent(self, event):
super(QMapWidget, self).paintEvent(event)
qp = QPainter()
qp.begin(self)
for p in self._pix_maps:
qp.drawPixmap(0, 0, p)
qp.end()
# def set_screen(self):
# self._screen = QPixMap()
class _MapEditor(Editor):
def init(self, parent):
self.control = self._create_control(parent)
def update_editor(self):
if self.control:
self.control.update()
# def set_size_policy(self, direction, resizable, springy, stretch):
# pass
def _create_control(self, parent):
control = QMapWidget()
# control.setMaximumSize(200,200)
lat_min = 34.052999
lon_min = -106.924551
lat_max = 34.076752
lon_max = -106.885971
#
# lat_min = 34.052999
# lon_min = -106.81
# lat_max = 34.08
# lon_max = -106.83
rect = (lat_min, lon_min, lat_max, lon_max)
server = 'https://tiles.wmflabs.org/bw-mapnik/{z}/{x}/{y}.png'
server = 'http://c.tile.stamen.com/watercolor/{z}/{x}/{y}.png'
# server = 'https://c.tiles.wmflabs.org/hillshading/{z}/{x}{y}.png'
# server = 'https://mt1.google.com/vt/lyrs=y&x={x}&y={y}&z={z}' # satelite
# server = 'https://mt1.google.com/vt/lyrs=t&x={x}&y={y}&z={z}' # terrain
# server = 'https://mt1.google.com/vt/lyrs=r&x={x}&y={y}&z={z}' # maps
smopy.TILE_SERVER = server
m = smopy.Map(rect, z=10)
# m = smopy.Map(rect)
# m = smopy.Map(lat_min, lon_min, z=10, tileserver='http://c.tile.stamen.com/watercolor/{z}/{x}/{y}.png')
# m.show_ipython()
# control.set_tile(m.img)
base = m.img
base = base.convert('RGBA')
base.putalpha(200)
control.set_tile(base)
# return control
server = 'https://tiles.wmflabs.org/bw-mapnik/{z}/{x}/{y}.png'
# server = 'https://mt1.google.com/vt/lyrs=y&x={x}&y={y}&z={z}' # satelite
smopy.TILE_SERVER = server
m = smopy.Map(rect, z=10)
# img = m.img
# img = img.convert('RGBA')
# img.putalpha(128)
# img = img.convert('RGB')
img = m.img.convert('RGBA')
img.putalpha(128)
img = Image.alpha_composite(base, img)
control.set_tile(img)
# control.set_tile(Image.blend(base, l1, 129))
# m.show_mpl()
# from matplotlib.pyplot import show
# show()
# control.set_screen()
return control
class MapViewEditor(BasicEditorFactory):
klass = _MapEditor
# ============= EOF =============================================
| apache-2.0 |
ebernhardson/l2r | code/task.py | 1 | 9415 | # -*- coding: utf-8 -*-
"""
@author: Chenglong Chen <[email protected]>
@brief: definitions for
- learner & ensemble learner
- feature & stacking feature
- task & stacking task
- task optimizer
"""
import os
import sys
import time
from optparse import OptionParser
from itertools import chain
import numpy as np
import pandas as pd
from scipy.stats import kendalltau
from sklearn.model_selection import GroupKFold
from sklearn.linear_model import Lasso, Ridge, BayesianRidge
from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor, GradientBoostingRegressor
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials, space_eval
from rankpy.metrics._metrics import KendallTau
import config
from utils import dist_utils, logging_utils, table_utils, time_utils
from utils.xgb_utils import XGBRank
from utils.rankpy_utils import LambdaMART
try:
from utils.keras_utils import KerasDNNRegressor
except:
pass
from model_param_space import ModelParamSpace
class Learner:
def __init__(self, learner_name, param_dict):
self.learner_name = learner_name
self.param_dict = param_dict
self.learner = self._get_learner()
def __str__(self):
return self.learner_name
def _get_learner(self):
# xgboost
if self.learner_name == "reg_xgb_rank":
return XGBRank(**self.param_dict)
# ranky
if self.learner_name == "reg_rankpy_lambdamart":
return LambdaMART(**self.param_dict)
# TODO: https://sourceforge.net/p/lemur/wiki/RankLib%20How%20to%20use/
# TODO: http://www.cs.cornell.edu/people/tj/svm_light/svm_rank.html
return None
def fit(self, X, y, ids, weight=None, feature_names=None):
if feature_names is not None:
self.learner.fit(X, y, ids, weight, feature_names)
else:
self.learner.fit(X, y, ids, weight)
return self
def predict(self, X, ids, weight=None, feature_names=None):
if feature_names is not None:
y_pred = self.learner.predict(X, ids, weight, feature_names)
else:
y_pred = self.learner.predict(X, ids, weight)
return y_pred
def plot_importance(self):
ax = self.learner.plot_importance()
return ax
class Feature:
def __init__(self, feature_name):
self.feature_name = feature_name
self.data_dict = self._load_data_dict()
def __str__(self):
return self.feature_name
def _load_data_dict(self):
fname = os.path.join(config.FEAT_DIR+"/Combine", self.feature_name+config.FEAT_FILE_SUFFIX)
data_dict = table_utils._read(fname)
return data_dict
## for refit
def _get_train_test_data(self):
# feature
X = self.data_dict["X"]
y = self.data_dict["y"]
w = self.data_dict["weights"]
ids = self.data_dict["query_ids"]
# split into train/test. Leaves out 30% of queries.
# TODO: Use all 3 splits for CV.
splitter = GroupKFold(n_splits=3)
train, test = next(splitter.split(X, None, ids))
# TODO: split in feature_combiner
X_train, X_test, y_train, y_test, ids_train, ids_test, w_train, w_test = chain.from_iterable(
((a[train], a[test]) for a in [X, y, w, ids]))
return X[train], y[train], w[train], ids[train], X[test], y[test], w[test], ids[test]
## for feature importance
def _get_feature_names(self):
return self.data_dict["feature_names"]
class Task:
def __init__(self, learner, feature, suffix, logger, verbose=True, plot_importance=False):
self.learner = learner
self.feature = feature
self.suffix = suffix
self.logger = logger
self.verbose = verbose
self.plot_importance = plot_importance
self.r2 = 0
def __str__(self):
return "[Feat@%s]_[Learner@%s]%s"%(str(self.feature), str(self.learner), str(self.suffix))
def _print_param_dict(self, d, prefix=" ", incr_prefix=" "):
for k,v in sorted(d.items()):
if isinstance(v, dict):
self.logger.info("%s%s:" % (prefix,k))
self._print_param_dict(v, prefix+incr_prefix, incr_prefix)
else:
self.logger.info("%s%s: %s" % (prefix,k,v))
def fit(self):
X_train, y_train, w_train, ids_train, X_test, y_test, w_test, ids_test = self.feature._get_train_test_data()
if self.plot_importance:
feature_names = self.feature._get_feature_names()
self.learner.fit(X_train, y_train, ids_train, w_train, feature_names)
y_pred = self.learner.predict(X_test, ids_test, w_test, feature_names)
else:
self.learner.fit(X_train, y_train, ids_train, w_train)
y_pred = self.learner.predict(X_test, ids_test, w_test)
# Compare y_pred vs y_test
taus = []
# Weighted kendall tau
metric = KendallTau(lambda i: -1 / np.log2(i + 2))
qweight_sum = 0
for i in np.unique(ids_test):
# probably a better way to do this...
condition = ids_test == i
# I think argsort is all we need...
yi_test = np.argsort(y_test[condition])
yi_pred = np.argsort(y_pred[condition])
tau = metric.distance(yi_test, yi_pred)
taus.append(tau)
self.mean_tau = np.mean(taus)
self.std_tau = np.std(taus)
self.logger.info("[%s] Mean Kendalls Tau: %f" % (self.__str__(), self.mean_tau))
self.logger.info("[%s] Std Kendalls Tau: %f" % (self.__str__(), self.std_tau))
# plot importance
if self.plot_importance:
ax = self.learner.plot_importance()
ax.figure.savefig("%s/%s.pdf"%(config.FIG_DIR, self.__str__()))
return self
def go(self):
self.fit()
return self
class TaskOptimizer:
def __init__(self, learner_name, feature_name, logger,
max_evals=100, verbose=True, plot_importance=False):
self.learner_name = learner_name
self.feature_name = feature_name
self.feature = self._get_feature()
self.logger = logger
self.max_evals = max_evals
self.verbose = verbose
self.plot_importance = plot_importance
self.trial_counter = 0
self.model_param_space = ModelParamSpace(self.learner_name)
def _get_feature(self):
return Feature(self.feature_name)
def _obj(self, param_dict):
self.trial_counter += 1
param_dict = self.model_param_space._convert_int_param(param_dict)
learner = Learner(self.learner_name, param_dict)
suffix = "_[Id@%s]"%str(self.trial_counter)
self.task = Task(learner, self.feature, suffix, self.logger, self.verbose, self.plot_importance)
self.task.go()
ret = {
"loss": 1. - self.task.mean_tau,
"attachments": {
"std_tau": self.task.std_tau,
},
"status": STATUS_OK,
}
return ret
def run(self):
start = time.time()
trials = Trials()
best = fmin(self._obj, self.model_param_space._build_space(), tpe.suggest, self.max_evals, trials)
best_params = space_eval(self.model_param_space._build_space(), best)
best_params = self.model_param_space._convert_int_param(best_params)
# To turn this into a loss function these are actually 1 - tau,
# converting back is same
trial_mean_taus = 1 - np.asarray(trials.losses(), dtype=float)
best_ind = np.argmin(trial_mean_taus)
best_mean_tau = trial_mean_taus[best_ind]
self.logger.info("-"*50)
self.logger.info("Best Mean Kendalls Tau: %.6f" % (best_mean_tau))
self.logger.info("Best param")
self.task._print_param_dict(best_params)
end = time.time()
_sec = end - start
_min = int(_sec/60.)
self.logger.info("Time")
if _min > 0:
self.logger.info(" %d mins"%_min)
else:
self.logger.info(" %d secs"%_sec)
self.logger.info("-"*50)
#------------------------ Main -------------------------
def main(options):
logname = "[Feat@%s]_[Learner@%s]_hyperopt_%s.log"%(
options.feature_name, options.learner_name, time_utils._timestamp())
logger = logging_utils._get_logger(config.LOG_DIR, logname)
optimizer = TaskOptimizer(options.learner_name,
options.feature_name, logger, options.max_evals, verbose=True,
plot_importance=options.plot_importance)
optimizer.run()
def parse_args(parser):
parser.add_option("-f", "--feat", type="string", dest="feature_name",
help="feature name", default="basic")
parser.add_option("-l", "--learner", type="string", dest="learner_name",
help="learner name", default="reg_skl_ridge")
parser.add_option("-e", "--eval", type="int", dest="max_evals",
help="maximun number of evals for hyperopt", default=100)
parser.add_option("-p", default=False, action="store_true", dest="plot_importance",
help="plot feautre importance (currently only for xgboost)")
(options, args) = parser.parse_args()
return options, args
if __name__ == "__main__":
parser = OptionParser()
options, args = parse_args(parser)
main(options)
| mit |
jzt5132/scikit-learn | examples/linear_model/plot_ols.py | 220 | 1940 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
IniterWorker/epitech-stats-notes | gui/gui.py | 1 | 1264 | import tkinter as tk
import matplotlib
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from config import Configure
matplotlib.use("TkAgg")
class Application(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.pack()
self.create_widgets()
self.master.wm_minsize(800, 600)
def create_widgets(self):
self.hi_there = tk.Button(self)
self.hi_there["text"] = "Hello World\n(click me)"
self.hi_there["command"] = self.say_hi
self.hi_there.pack(side="top")
self.quit = tk.Button(self, text="QUIT", fg="red", command=root.destroy)
self.quit.pack(side="bottom")
f = Figure(figsize=(5, 5), dpi=100)
a = f.add_subplot(111)
a.plot([1, 2, 3, 4, 5, 6, 7, 8], [5, 6, 1, 3, 8, 9, 3, 5])
canvas = FigureCanvasTkAgg(f, self)
canvas.show()
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
def say_hi(self):
print("hi there, everyone!")
def run_gui_mode():
root = tk.Tk()
root.geometry("800x600")
window = Application(master=root)
window.master.title("Epitech Stats")
window.mainloop()
| mit |
alexeyum/scikit-learn | examples/ensemble/plot_random_forest_regression_multioutput.py | 28 | 2642 | """
============================================================
Comparing random forests and the multi-output meta estimator
============================================================
An example to compare multi-output regression with random forest and
the :ref:`multioutput.MultiOutputRegressor <_multiclass>` meta-estimator.
This example illustrates the use of the
:ref:`multioutput.MultiOutputRegressor <_multiclass>` meta-estimator
to perform multi-output regression. A random forest regressor is used,
which supports multi-output regression natively, so the results can be
compared.
The random forest regressor will only ever predict values within the
range of observations or closer to zero for each of the targets. As a
result the predictions are biased towards the centre of the circle.
Using a single underlying feature the model learns both the
x and y coordinate as output.
"""
print(__doc__)
# Author: Tim Head <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(600, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y += (0.5 - rng.rand(*y.shape))
X_train, X_test, y_train, y_test = train_test_split(X, y,
train_size=400,
random_state=4)
max_depth = 30
regr_multirf = MultiOutputRegressor(RandomForestRegressor(max_depth=max_depth,
random_state=0))
regr_multirf.fit(X_train, y_train)
regr_rf = RandomForestRegressor(max_depth=max_depth, random_state=2)
regr_rf.fit(X_train, y_train)
# Predict on new data
y_multirf = regr_multirf.predict(X_test)
y_rf = regr_rf.predict(X_test)
# Plot the results
plt.figure()
s = 50
a = 0.4
plt.scatter(y_test[:, 0], y_test[:, 1],
c="navy", s=s, marker="s", alpha=a, label="Data")
plt.scatter(y_multirf[:, 0], y_multirf[:, 1],
c="cornflowerblue", s=s, alpha=a,
label="Multi RF score=%.2f" % regr_multirf.score(X_test, y_test))
plt.scatter(y_rf[:, 0], y_rf[:, 1],
c="c", s=s, marker="^", alpha=a,
label="RF score=%.2f" % regr_rf.score(X_test, y_test))
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("target 1")
plt.ylabel("target 2")
plt.title("Comparing random forests and the multi-output meta estimator")
plt.legend()
plt.show()
| bsd-3-clause |
Nyker510/scikit-learn | sklearn/datasets/twenty_newsgroups.py | 72 | 13586 | """Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_tfidf` function will in addition do a simple tf-idf
vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warning("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warning("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
with open(archive_path, 'wb') as f:
f.write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
with open(cache_path, 'wb') as f:
f.write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories: None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle: bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state: numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = os.path.join(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
data.description = 'the 20 newsgroups by date dataset'
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = os.path.join(data_home, filebase + ".pk")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
| bsd-3-clause |
RexFuzzle/sfepy | script/gen_gallery.py | 1 | 20332 | #!/usr/bin/env python
"""
Generate the images and rst files for gallery of SfePy examples.
The following steps need to be made to regenerate the documentation with the
updated example files:
1. Generate the files:
- for sfepy.org deployment::
$ ./script/gen_gallery.py -l ../doc-devel
- for local test build run from ./::
$ ./script/gen_gallery.py -l doc/_build/html/
2. remove doc/examples/::
$ rm -rf doc/examples/
3. copy gallery/examples/ to doc/::
$ cp -a gallery/examples/ doc/
4. regenerate the documentation::
$ python setup.py htmldocs
Additional steps for sfepy.org deployment:
- copy doc/_build/html/ to <sfepy.org>/doc-devel/
- copy gallery/gallery.html and gallery/images/ to <sfepy.org>/
"""
import sys
sys.path.append( '.' )
import os
import tempfile
import glob
import re
from optparse import OptionParser
import matplotlib.image as image
import sfepy
from sfepy.base.base import (get_default, ordered_iteritems,
import_file, output, Struct)
from sfepy.base.ioutils import (ensure_path, locate_files, remove_files,
edit_filename)
from sfepy.postprocess.domain_specific import DomainSpecificPlot
omits = [
'vibro_acoustic3d_mid.py',
'linear_elastic_mM.py',
'time_poisson_explicit.py',
'its2D_5.py',
'linear_elastic_probes.py',
'__init__.py',
]
omit_dirs = [
re.compile('.*output.*/').match,
]
custom = {
'acoustics/acoustics3d.py' : {
'_p_1' : {
'view' : (44, 57, 0.24, [-0.004, -0.007, 0.09]),
'roll' : 0,
},
'_p_2' : {
'view' : (-99, 120, 0.4, [0.0, 0.0, 0.07]),
'roll' : 141,
},
},
'acoustics/vibro_acoustic3d.py' : {
'_p1' : {
'view' : (45.0, 54.7, 1.47, [0.0, 0.0, 0.05]),
'roll' : -120,
},
'_p2' : {
'view' : (45.0, 54.7, 1.47, [0.0, 0.0, 0.15]),
'roll' : -120,
},
'_w' : {
'view' : (0.0, 0.0, 0.86, [0.0, 0.0, 0.1]),
'roll' : 0,
},
'_g0' : {
'view' : (0.0, 0.0, 0.86, [0.0, 0.0, 0.1]),
'roll' : 0,
},
},
'diffusion/laplace_1d.py' : {
'' : {
'is_wireframe' : True,
'domain_specific' : {
't' : DomainSpecificPlot('plot_warp_scalar',
['rel_scaling=1']),
},
'view' : (-90, 90, 1.5, [0, 0, 0]),
'roll' : 0,
'opacity' : {'wireframe' : 0.3},
},
},
'diffusion/laplace_coupling_lcbcs.py' : {
'' : {
'is_wireframe' : True,
'domain_specific' : {
'u1' : DomainSpecificPlot('plot_warp_scalar',
['rel_scaling=1']),
'u2' : DomainSpecificPlot('plot_warp_scalar',
['rel_scaling=1']),
},
'view' : (-82, 50, 3.6, [-0.43, -0.55, 0.4]),
'roll' : -23,
'opacity' : {'wireframe' : 0.3},
},
},
'diffusion/poisson_iga.py' : {
'' : {
'is_wireframe' : True,
'domain_specific' : {
't' : DomainSpecificPlot('plot_warp_scalar',
['rel_scaling=1']),
},
'view' : (55, 39, 6.6, [-0.35, -0.29, 0.35]),
'roll' : 15,
'opacity' : {'wireframe' : 0.3},
},
},
'diffusion/sinbc.py' : {
'_t' : {
'is_wireframe' : True,
'domain_specific' : {
't' : DomainSpecificPlot('plot_warp_scalar',
['rel_scaling=1']),
},
'view' : (-170, 30, 4.7, [0.34, 0.23, -0.26]),
'roll' : 71,
'opacity' : {'wireframe' : 0.3},
},
'_grad' : {
'opacity' : {'surface' : 0.3},
'view' : (-170, 30, 4.7, [0.34, 0.23, -0.26]),
'roll' : 71,
},
},
'linear_elasticity/elastic_contact_planes.py' : {
'' : {
'is_wireframe' : True,
'domain_specific' : {
'u' : DomainSpecificPlot('plot_displacements',
['rel_scaling=1']),
},
'view' : (-82, 47, 3.4, [-0.5, -0.24, -0.2]),
'roll' : -8.4,
'opacity' : {'wireframe' : 0.3},
},
},
'linear_elasticity/elastic_contact_sphere.py' : {
'' : {
'is_wireframe' : True,
'domain_specific' : {
'u' : DomainSpecificPlot('plot_displacements',
['rel_scaling=1']),
},
'view' : (-82, 47, 3.4, [-0.5, -0.24, -0.2]),
'roll' : -8.4,
'opacity' : {'wireframe' : 0.3},
},
},
'linear_elasticity/elastic_shifted_periodic.py' : {
'' : {
'is_wireframe' : True,
'only_names' : ['u'],
'domain_specific' : {
'u' : DomainSpecificPlot('plot_displacements',
['rel_scaling=1',
'color_kind="scalars"',
'color_name="von_mises_stress"']),
},
'view' : (142, 39, 16, [-4.7, -2.1, -1.9]),
'roll' : 8.4,
'opacity' : {'wireframe' : 0.3},
},
},
'linear_elasticity/linear_elastic_iga.py' : {
'' : {
'is_wireframe' : True,
'domain_specific' : {
'u' : DomainSpecificPlot('plot_displacements',
['rel_scaling=1']),
},
'view' : (-37, 51, 1.5, [-0.28, -0.29, 0.0]),
'roll' : -51.5,
'opacity' : {'wireframe' : 0.2},
},
},
'navier_stokes/stokes_slip_bc.py' : {
'' : {
'view' : (-63, 52, 5.2, [-1.5, -0.65, 0.12]),
'roll' : -32,
'resolution' : (800, 600),
'layout' : 'col',
'rel_scaling' : 0.1,
},
},
'multi_physics/thermo_elasticity_ess.py' : {
'' : {
'is_wireframe' : True,
'only_names' : ['u'],
'domain_specific' : {
'u' : DomainSpecificPlot('plot_displacements',
['rel_scaling=1000',
'color_kind="scalars"',
'color_name="T"']),
},
'view' : (-51, 71, 12.9, [-2.3, -2.4, -0.2]),
'roll' : -65,
'opacity' : {'wireframe' : 0.3},
},
}
}
def _omit(filename):
omit = False
base = os.path.basename(filename)
if base in omits:
omit = True
for omit_dir in omit_dirs:
if omit_dir(filename) is not None:
omit = True
break
return omit
def _get_fig_filenames(ebase, images_dir):
fig_base = os.path.splitext(ebase)[0].replace(os.path.sep, '-')
yield fig_base
if ebase in custom:
suffixes = sorted(custom[ebase].keys())
for suffix in suffixes:
fig_filename = os.path.join(images_dir, fig_base + suffix + '.png')
yield fig_filename
else:
fig_filename = os.path.join(images_dir, fig_base + '.png')
yield fig_filename
def _get_fig_filename(ebase, images_dir, suffix):
fig_base = os.path.splitext(ebase)[0].replace(os.path.sep, '-')
fig_filename = os.path.join(images_dir, fig_base + suffix + '.png')
return fig_filename
def _make_sphinx_path(path, relative=False):
if relative:
aux = path.replace(sfepy.data_dir, '')
prefix = ('..' + os.path.sep) * aux.count(os.path.sep)
sphinx_path = prefix[:-1] + aux
else:
sphinx_path = path.replace(sfepy.data_dir, '/..')
return sphinx_path
def generate_images(images_dir, examples_dir):
"""
Generate images from results of running examples found in
`examples_dir` directory.
The generated images are stored to `images_dir`,
"""
from sfepy.applications import solve_pde
from sfepy.postprocess.viewer import Viewer
from sfepy.postprocess.utils import mlab
prefix = output.prefix
output_dir = tempfile.mkdtemp()
trunk = os.path.join(output_dir, 'result')
options = Struct(output_filename_trunk=trunk,
output_format='vtk',
save_ebc=False,
save_ebc_nodes=False,
save_regions=False,
save_field_meshes=False,
save_regions_as_groups=False,
solve_not=False)
default_views = {'' : {}}
ensure_path(images_dir + os.path.sep)
view = Viewer('', offscreen=False)
for ex_filename in locate_files('*.py', examples_dir):
if _omit(ex_filename): continue
output.level = 0
output.prefix = prefix
ebase = ex_filename.replace(examples_dir, '')[1:]
output('trying "%s"...' % ebase)
try:
problem, state = solve_pde(ex_filename, options=options)
except KeyboardInterrupt:
raise
except:
problem = None
output('***** failed! *****')
if problem is not None:
if ebase in custom:
views = custom[ebase]
else:
views = default_views
tsolver = problem.get_time_solver()
if tsolver.ts is None:
suffix = None
else:
suffix = tsolver.ts.suffix % (tsolver.ts.n_step - 1)
filename = problem.get_output_name(suffix=suffix)
for suffix, kwargs in views.iteritems():
fig_filename = _get_fig_filename(ebase, images_dir, suffix)
fname = edit_filename(filename, suffix=suffix)
output('displaying results from "%s"' % fname)
disp_name = fig_filename.replace(sfepy.data_dir, '')
output('to "%s"...' % disp_name.lstrip(os.path.sep))
view.filename = fname
view(scene=view.scene, show=False, is_scalar_bar=True, **kwargs)
view.save_image(fig_filename)
mlab.clf()
output('...done')
remove_files(output_dir)
output('...done')
def generate_thumbnails(thumbnails_dir, images_dir, scale=0.3):
"""
Generate thumbnails into `thumbnails_dir` corresponding to images in
`images_dir`.
"""
ensure_path(thumbnails_dir + os.path.sep)
output('generating thumbnails...')
filenames = glob.glob(os.path.join(images_dir, '*.png'))
for fig_filename in filenames:
ebase = fig_filename.replace(sfepy.data_dir, '').lstrip(os.path.sep)
output('"%s"' % ebase)
base = os.path.basename(fig_filename)
thumb_filename = os.path.join(thumbnails_dir, base)
image.thumbnail(fig_filename, thumb_filename, scale=scale)
output('...done')
_index = """\
.. _%s-gallery-examples-index:
%s
%s
.. toctree::
:maxdepth: 2
"""
_image = '.. image:: %s'
_include = """\
.. _%s:
%s
%s
**Description**
%s
%s
:download:`source code <%s>`
.. literalinclude:: %s
"""
def generate_rst_files(rst_dir, examples_dir, images_dir):
"""
Generate Sphinx rst files for examples in `examples_dir` with images
in `images_dir` and put them into `rst_dir`.
Returns
-------
dir_map : dict
The directory mapping of examples and corresponding rst files.
"""
ensure_path(rst_dir + os.path.sep)
output('generating rst files...')
dir_map = {}
for ex_filename in locate_files('*.py', examples_dir):
if _omit(ex_filename): continue
ebase = ex_filename.replace(examples_dir, '')[1:]
base_dir = os.path.dirname(ebase)
rst_filename = os.path.basename(ex_filename).replace('.py', '.rst')
dir_map.setdefault(base_dir, []).append((ex_filename, rst_filename))
for dirname, filenames in dir_map.iteritems():
filenames = sorted(filenames, cmp=lambda a, b: cmp(a[1], b[1]))
dir_map[dirname ] = filenames
# Main index.
mfd = open(os.path.join(rst_dir, 'index.rst'), 'w')
mfd.write(_index % ('sfepy', 'Examples', '=' * 8))
for dirname, filenames in ordered_iteritems(dir_map):
full_dirname = os.path.join(rst_dir, dirname)
ensure_path(full_dirname + os.path.sep)
# Subdirectory index.
ifd = open(os.path.join(full_dirname, 'index.rst'), 'w')
ifd.write(_index % (dirname, dirname, '=' * len(dirname)))
for ex_filename, rst_filename in filenames:
full_rst_filename = os.path.join(full_dirname, rst_filename)
output('"%s"' % full_rst_filename.replace(rst_dir, '')[1:])
rst_filename_ns = rst_filename.replace('.rst', '')
ebase = ex_filename.replace(examples_dir, '')[1:]
rst_ex_filename = _make_sphinx_path(ex_filename)
docstring = get_default(import_file(ex_filename).__doc__,
'missing description!')
ifd.write(' %s\n' % rst_filename_ns)
fig_include = ''
fig_base = _get_fig_filenames(ebase, images_dir).next()
for fig_filename in _get_fig_filenames(ebase, images_dir):
rst_fig_filename = _make_sphinx_path(fig_filename)
if os.path.exists(fig_filename):
fig_include += _image % rst_fig_filename + '\n'
# Example rst file.
fd = open(full_rst_filename, 'w')
fd.write(_include % (fig_base, ebase, '=' * len(ebase),
docstring,
fig_include,
rst_ex_filename, rst_ex_filename))
fd.close()
ifd.close()
mfd.write(' %s/index\n' % dirname)
mfd.close()
output('...done')
return dir_map
_gallery_template_file = os.path.join(sfepy.top_dir,
'doc/gallery_template.html')
_link_template = """\
<div class="figure">
<a class="reference external image-reference" href="../%s">
<img alt="%s" src="%s" />
</a>
<p class="caption">
<a class="reference internal" href="../%s"><em>%s</em></a>
</p>
</div>
<div class="toctree-wrapper compound">
</div>
"""
_side_links="<li><a class='reference internal' href='#%s'>%s</a></li>"
_div_line ="""\
<div class="section" id="%s">
<h2>%s<a class="headerlink" href="\#%s" title="Permalink to this headline">
</a></h2>
%s
<div style="clear: both"></div></div>
"""
def generate_gallery_html(examples_dir, output_filename, gallery_dir,
rst_dir, thumbnails_dir, dir_map, link_prefix):
"""
Generate the gallery html file with thumbnail images and links to
examples.
Parameters
----------
output_filename : str
The output html file name.
gallery_dir : str
The top level directory of gallery files.
rst_dir : str
The full path to rst files of examples within `gallery_dir`.
thumbnails_dir : str
The full path to thumbnail images within `gallery_dir`.
dir_map : dict
The directory mapping returned by `generate_rst_files()`
link_prefix : str, optional
The prefix to prepend to links to individual pages of examples.
"""
output('generating %s...' % output_filename)
with open(_gallery_template_file, 'r') as fd:
gallery_template = fd.read()
div_lines=[]
sidebar = []
for dirname, filenames in ordered_iteritems(dir_map):
full_dirname = os.path.join(rst_dir, dirname)
dirnamenew = dirname.replace("_"," ")
sidebarline = _side_links % (dirname, dirnamenew.title())
lines = []
for ex_filename, rst_filename in filenames:
full_rst_filename = os.path.join(full_dirname, rst_filename)
ebase = full_rst_filename.replace(rst_dir, '')[1:]
ebase = edit_filename(ebase, new_ext='.py')
link_base = full_rst_filename.replace(gallery_dir, '')[1:]
link = os.path.join(link_prefix,
os.path.splitext(link_base)[0] + '.html')
_get_fig_filenames(ebase, thumbnails_dir).next()
for thumbnail_filename in _get_fig_filenames(ebase,
thumbnails_dir):
if not os.path.isfile(thumbnail_filename):
# Skip examples with no image (= failed examples).
continue
thumbnail_name = thumbnail_filename.replace(gallery_dir,
'')[1:]
path_to_file = os.path.join(examples_dir,ebase)
docstring = get_default(import_file(path_to_file).__doc__,
'missing description!')
docstring = docstring.replace('e.g.', 'eg:')
docstring = docstring.split('.')
line = _link_template % (link,os.path.splitext(ebase)[0],
thumbnail_name,link,docstring[0]+'.')
lines.append(line)
if(len(lines)!=0):
div_lines.append(_div_line % (dirname, dirnamenew.title(),
dirname, '\n'.join(lines)))
sidebar.append(sidebarline)
fd = open(output_filename, 'w')
fd.write(gallery_template % ((link_prefix,) * 7
+ ('\n'.join(sidebar), '\n'.join(div_lines))))
fd.close()
output('...done')
usage = '%prog [options]\n' + __doc__.rstrip()
help = {
'examples_dir' :
'directory containing examples [default: %default]',
'images_dir' :
'directory where to store gallery images [default: gallery/images]',
'no_images' :
'do not (re)generate images and thumbnails',
'output_filename' :
'output file name [default: %default]',
'link_prefix' :
'prefix to be prepended to links to examples pages in gallery '
'[default: %default]',
}
def main():
parser = OptionParser(usage=usage, version='%prog')
parser.add_option('-e', '--examples-dir', metavar='directory',
action='store', dest='examples_dir',
default='examples', help=help['examples_dir'])
parser.add_option('-i', '--images-dir', metavar='directory',
action='store', dest='images_dir',
default=None, help=help['images_dir'])
parser.add_option('-n', '--no-images',
action='store_true', dest='no_images',
default=False, help=help['no_images'])
parser.add_option('-o', '--output', metavar='output_filename',
action='store', dest='output_filename',
default='gallery/gallery.html',
help=help['output_filename'])
parser.add_option('-l', '--link-prefix', metavar='prefix',
action='store', dest='link_prefix',
default='http://sfepy.org/doc-devel',
help=help['link_prefix'])
(options, args) = parser.parse_args()
examples_dir = os.path.realpath(options.examples_dir)
output_filename = os.path.realpath(options.output_filename)
gallery_dir = os.path.dirname(output_filename)
images_dir = get_default(options.images_dir,
os.path.join(gallery_dir, 'images'))
thumbnails_dir = os.path.join(images_dir, 'thumbnails')
rst_dir = os.path.join(gallery_dir, 'examples')
if not options.no_images:
generate_images(images_dir, examples_dir)
generate_thumbnails(thumbnails_dir, images_dir)
dir_map = generate_rst_files(rst_dir, examples_dir, images_dir)
generate_gallery_html(examples_dir,output_filename, gallery_dir,
rst_dir, thumbnails_dir, dir_map,
link_prefix=options.link_prefix)
if __name__ == '__main__':
main()
| bsd-3-clause |
wesm/arrow | python/pyarrow/tests/test_orc.py | 1 | 6160 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import decimal
import datetime
import pyarrow as pa
# Marks all of the tests in this module
# Ignore these with pytest ... -m 'not orc'
pytestmark = pytest.mark.orc
try:
from pandas.testing import assert_frame_equal
import pandas as pd
except ImportError:
pass
@pytest.fixture(scope="module")
def datadir(base_datadir):
return base_datadir / "orc"
def fix_example_values(actual_cols, expected_cols):
"""
Fix type of expected values (as read from JSON) according to
actual ORC datatype.
"""
for name in expected_cols:
expected = expected_cols[name]
actual = actual_cols[name]
if (name == "map" and
[d.keys() == {'key', 'value'} for m in expected for d in m]):
# convert [{'key': k, 'value': v}, ...] to [(k, v), ...]
for i, m in enumerate(expected):
expected_cols[name][i] = [(d['key'], d['value']) for d in m]
continue
typ = actual[0].__class__
if issubclass(typ, datetime.datetime):
# timestamp fields are represented as strings in JSON files
expected = pd.to_datetime(expected)
elif issubclass(typ, datetime.date):
# date fields are represented as strings in JSON files
expected = expected.dt.date
elif typ is decimal.Decimal:
converted_decimals = [None] * len(expected)
# decimal fields are represented as reals in JSON files
for i, (d, v) in enumerate(zip(actual, expected)):
if not pd.isnull(v):
exp = d.as_tuple().exponent
factor = 10 ** -exp
converted_decimals[i] = (
decimal.Decimal(round(v * factor)).scaleb(exp))
expected = pd.Series(converted_decimals)
expected_cols[name] = expected
def check_example_values(orc_df, expected_df, start=None, stop=None):
if start is not None or stop is not None:
expected_df = expected_df[start:stop].reset_index(drop=True)
assert_frame_equal(orc_df, expected_df, check_dtype=False)
def check_example_file(orc_path, expected_df, need_fix=False):
"""
Check a ORC file against the expected columns dictionary.
"""
from pyarrow import orc
orc_file = orc.ORCFile(orc_path)
# Exercise ORCFile.read()
table = orc_file.read()
assert isinstance(table, pa.Table)
table.validate()
# This workaround needed because of ARROW-3080
orc_df = pd.DataFrame(table.to_pydict())
assert set(expected_df.columns) == set(orc_df.columns)
# reorder columns if necessary
if not orc_df.columns.equals(expected_df.columns):
expected_df = expected_df.reindex(columns=orc_df.columns)
if need_fix:
fix_example_values(orc_df, expected_df)
check_example_values(orc_df, expected_df)
# Exercise ORCFile.read_stripe()
json_pos = 0
for i in range(orc_file.nstripes):
batch = orc_file.read_stripe(i)
check_example_values(pd.DataFrame(batch.to_pydict()),
expected_df,
start=json_pos,
stop=json_pos + len(batch))
json_pos += len(batch)
assert json_pos == orc_file.nrows
@pytest.mark.pandas
@pytest.mark.parametrize('filename', [
'TestOrcFile.test1.orc',
'TestOrcFile.testDate1900.orc',
'decimal.orc'
])
def test_example_using_json(filename, datadir):
"""
Check a ORC file example against the equivalent JSON file, as given
in the Apache ORC repository (the JSON file has one JSON object per
line, corresponding to one row in the ORC file).
"""
# Read JSON file
path = datadir / filename
table = pd.read_json(str(path.with_suffix('.jsn.gz')), lines=True)
check_example_file(path, table, need_fix=True)
def test_orcfile_empty(datadir):
from pyarrow import orc
table = orc.ORCFile(datadir / "TestOrcFile.emptyFile.orc").read()
assert table.num_rows == 0
expected_schema = pa.schema([
("boolean1", pa.bool_()),
("byte1", pa.int8()),
("short1", pa.int16()),
("int1", pa.int32()),
("long1", pa.int64()),
("float1", pa.float32()),
("double1", pa.float64()),
("bytes1", pa.binary()),
("string1", pa.string()),
("middle", pa.struct(
[("list", pa.list_(
pa.struct([("int1", pa.int32()),
("string1", pa.string())])))
])),
("list", pa.list_(
pa.struct([("int1", pa.int32()),
("string1", pa.string())])
)),
("map", pa.map_(pa.string(),
pa.struct([("int1", pa.int32()),
("string1", pa.string())])
)),
])
assert table.schema == expected_schema
def test_orcfile_readwrite():
from pyarrow import orc
buffer_output_stream = pa.BufferOutputStream()
a = pa.array([1, None, 3, None])
b = pa.array([None, "Arrow", None, "ORC"])
table = pa.table({"int64": a, "utf8": b})
orc.write_table(buffer_output_stream, table)
buffer_reader = pa.BufferReader(buffer_output_stream.getvalue())
output_table = orc.ORCFile(buffer_reader).read()
assert table.equals(output_table)
| apache-2.0 |
aaren/notedown | notedown/main.py | 1 | 10523 | from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import argparse
import pkg_resources
import io
import logging
import nbformat as nbformat
from nbconvert.utils.io import unicode_std_stream
from .notedown import (MarkdownReader,
MarkdownWriter,
Knitr,
run,
strip)
try:
__version__ = pkg_resources.require('notedown')[0].version
except pkg_resources.DistributionNotFound:
__version__ = 'testing'
markdown_template \
= pkg_resources.resource_filename('notedown',
'templates/markdown.tpl')
markdown_figure_template \
= pkg_resources.resource_filename('notedown',
'templates/markdown_outputs.tpl')
examples = """
Example usage of notedown
-------------------------
Convert markdown into notebook:
notedown input.md > output.ipynb
notedown input.md --output output.ipynb
Convert a notebook into markdown, with outputs intact:
notedown input.ipynb --from notebook --to markdown > output_with_outputs.md
Convert a notebook into markdown, stripping all outputs:
notedown input.ipynb --from notebook --to markdown --strip > output.md
Strip the output cells from markdown:
notedown with_output_cells.md --to markdown --strip > no_output_cells.md
Convert from markdown and execute:
notedown input.md --run > executed_notebook.ipynb
Convert r-markdown into markdown:
notedown input.Rmd --to markdown --knit > output.md
Convert r-markdown into an IPython notebook:
notedown input.Rmd --knit > output.ipynb
Convert r-markdown into a notebook with the outputs computed, using
the rmagic extension to execute the code blocks:
notedown input.Rmd --knit --rmagic --run > executed_output.ipynb
"""
def convert(content, informat, outformat, strip_outputs=False):
if os.path.exists(content):
with io.open(content, 'r', encoding='utf-8') as f:
contents = f.read()
else:
contents = content
readers = {'notebook': nbformat,
'markdown': MarkdownReader(precode='',
magic=False,
match='fenced')
}
writers = {'notebook': nbformat,
'markdown': MarkdownWriter(markdown_template,
strip_outputs=strip_outputs)
}
reader = readers[informat]
writer = writers[outformat]
notebook = reader.reads(contents, as_version=4)
return writer.writes(notebook)
def ftdetect(filename):
"""Determine if filename is markdown or notebook,
based on the file extension.
"""
_, extension = os.path.splitext(filename)
md_exts = ['.md', '.markdown', '.mkd', '.mdown', '.mkdn', '.Rmd']
nb_exts = ['.ipynb']
if extension in md_exts:
return 'markdown'
elif extension in nb_exts:
return 'notebook'
else:
return None
def command_line_parser():
"""Create parser for command line usage."""
description = "Create an IPython notebook from markdown."
example_use = "Example: notedown some_markdown.md > new_notebook.ipynb"
parser = argparse.ArgumentParser(description=description,
epilog=example_use)
parser.add_argument('input_file',
help="markdown input file (default STDIN)",
nargs="?",
default='-')
parser.add_argument('-o', '--output',
help=("output file, (default STDOUT). "
"If flag used but no file given, use "
"the name of the input file to "
"determine the output filename. "
"This will OVERWRITE if input and output "
"formats are the same."),
nargs="?",
default='-',
const='')
parser.add_argument('--from',
dest='informat',
choices=('notebook', 'markdown'),
help=("format to convert from, defaults to markdown "
"or file extension"))
parser.add_argument('--to',
dest='outformat',
choices=('notebook', 'markdown'),
help=("format to convert to, defaults to notebook "
"or file extension. Setting --render forces "
"this to 'markdown'"))
parser.add_argument('--run', '--execute',
action='store_true',
help=("run the notebook, executing the "
"contents of each cell"))
parser.add_argument('--timeout',
default=30,
type=int,
help=("set the cell execution timeout (in seconds)"))
parser.add_argument('--strip',
action='store_true',
dest='strip_outputs',
help=("strip output cells"))
parser.add_argument('--precode',
nargs='+',
default=[],
help=("additional code to place at the start of the "
"notebook, e.g. --pre '%%matplotlib inline' "
"'import numpy as np'"))
parser.add_argument('--knit',
nargs='?',
help=("pre-process the markdown with knitr. "
"Default chunk options are 'eval=FALSE' "
"but you can change this by passing a string. "
"Requires R in your path and knitr installed."),
const='eval=FALSE')
parser.add_argument('--rmagic',
action='store_true',
help=("autoload the rmagic extension. Synonym for "
"--precode '%%load_ext rpy2.ipython'"))
parser.add_argument('--nomagic',
action='store_false',
dest='magic',
help=("disable code magic."))
parser.add_argument('--render',
help=('render outputs, forcing markdown output'),
action='store_true')
parser.add_argument('--template',
help=('template file'))
parser.add_argument('--match',
default='all',
help=("determine kind of code blocks that get "
"converted into code cells. "
"choose from 'all' (default), 'fenced', "
"'strict' or a specific language to match on"))
parser.add_argument('--examples',
help=('show example usage'),
action='store_true')
parser.add_argument('--version',
help=('print version number'),
action='store_true')
parser.add_argument('--debug',
help=('show logging output'),
action='store_true')
return parser
def main(args, help=''):
if args.debug:
logging.basicConfig(level=logging.DEBUG)
if args.version:
print(__version__)
sys.exit()
if args.examples:
print(examples)
sys.exit()
# if no stdin and no input file
if args.input_file == '-' and sys.stdin.isatty():
sys.stdout.write(help)
sys.exit()
elif args.input_file == '-':
input_file = sys.stdin
elif args.input_file != '-':
input_file = io.open(args.input_file, 'r', encoding='utf-8')
else:
sys.exit('malformed input')
# pre-process markdown by using knitr on it
if args.knit:
knitr = Knitr()
input_file = knitr.knit(input_file, opts_chunk=args.knit)
if args.rmagic:
args.precode.append(r"%load_ext rpy2.ipython")
if args.render:
template_file = markdown_figure_template
else:
template_file = markdown_template
template_file = args.template or template_file
# reader and writer classes with args and kwargs to
# instantiate with
readers = {'notebook': nbformat,
'markdown': MarkdownReader(precode='\n'.join(args.precode),
magic=args.magic,
match=args.match,
caption_comments=args.render)
}
writers = {'notebook': nbformat,
'markdown': MarkdownWriter(template_file,
strip_outputs=args.strip_outputs)
}
informat = args.informat or ftdetect(input_file.name) or 'markdown'
outformat = args.outformat or ftdetect(args.output) or 'notebook'
if args.render:
outformat = 'markdown'
reader = readers[informat]
writer = writers[outformat]
with input_file as ip:
notebook = reader.read(ip, as_version=4)
if args.run:
run(notebook, timeout=args.timeout)
if args.strip_outputs:
strip(notebook)
output_ext = {'markdown': '.md',
'notebook': '.ipynb'}
if not args.output and args.input_file != '-':
# overwrite
fout = os.path.splitext(args.input_file)[0] + output_ext[outformat]
# grab the output here so we don't obliterate the file if
# there is an error
output = writer.writes(notebook)
with io.open(fout, 'w', encoding='utf-8') as op:
op.write(output)
elif not args.output and args.input_file == '-':
# overwrite error (input is stdin)
sys.exit('Cannot overwrite with no input file given.')
elif args.output == '-':
# write stdout
writer.write(notebook, unicode_std_stream('stdout'))
elif args.output != '-':
# write to filename
with io.open(args.output, 'w', encoding='utf-8') as op:
writer.write(notebook, op)
def app():
parser = command_line_parser()
args = parser.parse_args()
main(args, help=parser.format_help())
if __name__ == '__main__':
app()
| bsd-2-clause |
kvr777/deep-learning | weight-initialization/helper.py | 153 | 3649 | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def hist_dist(title, distribution_tensor, hist_range=(-4, 4)):
"""
Display histogram of a TF distribution
"""
with tf.Session() as sess:
values = sess.run(distribution_tensor)
plt.title(title)
plt.hist(values, np.linspace(*hist_range, num=len(values)/2))
plt.show()
def _get_loss_acc(dataset, weights):
"""
Get losses and validation accuracy of example neural network
"""
batch_size = 128
epochs = 2
learning_rate = 0.001
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
learn_rate = tf.placeholder(tf.float32)
biases = [
tf.Variable(tf.zeros([256])),
tf.Variable(tf.zeros([128])),
tf.Variable(tf.zeros([dataset.train.labels.shape[1]]))
]
# Layers
layer_1 = tf.nn.relu(tf.matmul(features, weights[0]) + biases[0])
layer_2 = tf.nn.relu(tf.matmul(layer_1, weights[1]) + biases[1])
logits = tf.matmul(layer_2, weights[2]) + biases[2]
# Training loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# Optimizer
optimizer = tf.train.AdamOptimizer(learn_rate).minimize(loss)
# Accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Measurements use for graphing loss
loss_batch = []
with tf.Session() as session:
session.run(tf.global_variables_initializer())
batch_count = int((dataset.train.num_examples / batch_size))
# The training cycle
for epoch_i in range(epochs):
for batch_i in range(batch_count):
batch_features, batch_labels = dataset.train.next_batch(batch_size)
# Run optimizer and get loss
session.run(
optimizer,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
l = session.run(
loss,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
loss_batch.append(l)
valid_acc = session.run(
accuracy,
feed_dict={features: dataset.validation.images, labels: dataset.validation.labels, learn_rate: 1.0})
# Hack to Reset batches
dataset.train._index_in_epoch = 0
dataset.train._epochs_completed = 0
return loss_batch, valid_acc
def compare_init_weights(
dataset,
title,
weight_init_list,
plot_n_batches=100):
"""
Plot loss and print stats of weights using an example neural network
"""
colors = ['r', 'b', 'g', 'c', 'y', 'k']
label_accs = []
label_loss = []
assert len(weight_init_list) <= len(colors), 'Too many inital weights to plot'
for i, (weights, label) in enumerate(weight_init_list):
loss, val_acc = _get_loss_acc(dataset, weights)
plt.plot(loss[:plot_n_batches], colors[i], label=label)
label_accs.append((label, val_acc))
label_loss.append((label, loss[-1]))
plt.title(title)
plt.xlabel('Batches')
plt.ylabel('Loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
print('After 858 Batches (2 Epochs):')
print('Validation Accuracy')
for label, val_acc in label_accs:
print(' {:7.3f}% -- {}'.format(val_acc*100, label))
print('Loss')
for label, loss in label_loss:
print(' {:7.3f} -- {}'.format(loss, label))
| mit |
OpenDroneMap/OpenDroneMap | opendm/dem/ground_rectification/rectify.py | 2 | 7435 | import argparse
import numpy as np
from os import path
from sklearn.neighbors import BallTree
from sklearn.linear_model import RANSACRegressor
from .extra_dimensions.distance_dimension import DistanceDimension
from .extra_dimensions.partition_dimension import PartitionDimension
from .extra_dimensions.extended_dimension import ExtendedDimension
from .grid.builder import build_grid
from .bounds.utils import calculate_convex_hull_bounds
from .io.las_io import read_cloud, write_cloud
from .partition.selector import select_partition_plan
from .point_cloud import PointCloud
EPSILON = 0.00001
def run_rectification(**kwargs):
header, point_cloud = read_cloud(kwargs['input'])
if 'reclassify_plan' in kwargs and kwargs['reclassify_plan'] is not None:
point_cloud = reclassify_cloud(point_cloud, kwargs['reclassify_plan'], kwargs['reclassify_threshold'], kwargs['min_points'], kwargs['min_area'])
if 'extend_plan' in kwargs and kwargs['extend_plan'] is not None:
point_cloud = extend_cloud(point_cloud, kwargs['extend_plan'], kwargs['extend_grid_distance'], kwargs['min_points'], kwargs['min_area'])
write_cloud(header, point_cloud, kwargs['output'], kwargs['debug'])
def reclassify_cloud(point_cloud, plan, threshold, min_points, min_area):
# Get only ground
ground_cloud = point_cloud[point_cloud.classification == 2]
# Get the partition plan, according to the specified criteria
partition_plan = select_partition_plan(plan, ground_cloud)
# Execute the partition plan, and get all the partitions
partitions = [result for result in partition_plan.execute(min_points=min_points, min_area=min_area)]
# Add 'distance to ground' and 'partition number' dimensions to the cloud
for dimension in [DistanceDimension(), PartitionDimension('reclassify_partition')]:
# Calculate new dimension for partition
for partition in partitions:
dimension.assign(partition.point_cloud)
# Update new data to the original point cloud
point_cloud.update(partition.point_cloud)
# Calculate the points that need to be reclassified
mask = point_cloud.get_extra_dimension_values('distance_to_ground') > threshold
# Reclassify them as 'unclassified'
point_cloud.classification[mask] = 1
return point_cloud
def extend_cloud(point_cloud, plan, distance, min_points, min_area):
# Get only ground
ground_cloud = point_cloud[point_cloud.classification == 2]
# Read the bounds file
bounds = calculate_convex_hull_bounds(ground_cloud.get_xy())
# Generate a grid of 2D points inside the bounds, with a distance of 'distance' between them
grid_2d = build_grid(bounds, ground_cloud, distance)
# Create a new point cloud
grid_3d = PointCloud.with_xy(grid_2d)
# Get the partition plan, according to the specified criteria
partition_plan = select_partition_plan(plan, ground_cloud)
# Execute the partition plan, and get all the partitions
partitions = partition_plan.execute(distance=distance, min_points=min_points, min_area=min_area, bounds=bounds)
# Create dimensions
partition_dimension = PartitionDimension('extend_partition')
extended_dimension = ExtendedDimension()
for partition in partitions:
# Keep the grid point that are inside the partition
grid_inside = partition.bounds.keep_points_inside(grid_3d)
if grid_inside.len() > 0:
# In each partition, calculate the altitude of the grid points
new_points = __calculate_new_points(grid_inside, partition.point_cloud)
# Assign the dimension values
partition_dimension.assign(new_points, partition.point_cloud)
extended_dimension.assign(new_points)
# Update the original 3d grid with the new calculated points
grid_3d.update(new_points)
else:
# Assign the original points the correct partition
partition_dimension.assign(partition.point_cloud)
# Update new information to the original point cloud
point_cloud.update(partition.point_cloud)
# Calculate the bounding box of the original cloud
bbox = point_cloud.get_bounding_box()
# Remove points that might have ended up outside the bbox
grid_3d = bbox.keep_points_inside(grid_3d)
# Add the new grid points to the original cloud
point_cloud.concatenate(grid_3d)
# Add the new points to the original point cloud
return point_cloud
def __calculate_new_points(grid_points_inside, partition_point_cloud):
# Calculate RANSCAC model
model = RANSACRegressor().fit(partition_point_cloud.get_xy(), partition_point_cloud.get_z())
# With the ransac model, calculate the altitude for each grid point
grid_points_altitude = model.predict(grid_points_inside.get_xy())
# Calculate color for new points
[avg_red, avg_green, avg_blue] = np.mean(partition_point_cloud.rgb, axis=0)
red = np.full(grid_points_inside.len(), avg_red)
green = np.full(grid_points_inside.len(), avg_green)
blue = np.full(grid_points_inside.len(), avg_blue)
# Classify all new points as ground
classification = np.full(grid_points_inside.len(), 2, dtype=np.uint8)
# Split xy into columns
[x, y] = np.hsplit(grid_points_inside.get_xy(), 2)
# Return point cloud
return PointCloud.with_dimensions(x.ravel(), y.ravel(), grid_points_altitude, classification, red, green, blue, grid_points_inside.indices)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='This script takes a pre-classified point cloud, and then it re-clasiffies wrongly classified ground point to non-ground points and finally adds ground points where needed.')
parser.add_argument('input', type=str, help='The path where to find the pre-classified point cloud.')
parser.add_argument('output', type=str, help='The path where to save the rectified point cloud.')
parser.add_argument('--reclassify_plan', type=str, help='The partition plan to use reclasiffication. Must be one of(one, uniform, median, surrounding)')
parser.add_argument('--reclassify_threshold', type=float, help='Every point with a distance to the estimated ground that is higher than the threshold will be reclassified as non ground', default=5)
parser.add_argument('--extend_plan', type=str, help='The partition plan to use for extending the ground. Must be one of(one, uniform, median, surrounding)')
parser.add_argument('--extend_grid_distance', type=float, help='The distance between points on the grid that will be added to the point cloud.', default=5)
parser.add_argument('--min_area', type=int, help='Some partition plans need a minimum area as a stopping criteria.', default=750)
parser.add_argument('--min_points', type=int, help='Some partition plans need a minimum number of points as a stopping criteria.', default=500)
args = parser.parse_args()
if args.reclassify_plan is None and args.extend_plan is None:
raise Exception("Please set a reclassifying or extension plan. Otherwise there is nothing for me to do.")
run(input=args.input, reclassify_plan=args.reclassify_plan, reclassify_threshold=args.reclassify_threshold, \
extend_plan=args.extend_plan, extend_grid_distance=args.extend_grid_distance, \
output=args.output, min_points=args.min_points, min_area=args.min_area, debug=False)
| gpl-3.0 |
kjung/scikit-learn | examples/svm/plot_svm_scale_c.py | 44 | 5405 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['navy', 'cyan', 'darkorange']
lw = 2
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(train_size=train_size, n_iter=250,
random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size, color=colors[k], lw=lw)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/matplotlib/tests/test_basic.py | 5 | 1550 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import warnings
from nose.tools import assert_equal
from matplotlib.cbook import MatplotlibDeprecationWarning
from matplotlib.testing.decorators import knownfailureif
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
'The finance module has been deprecated in mpl 2',
MatplotlibDeprecationWarning)
from pylab import *
def test_simple():
assert_equal(1 + 1, 2)
@knownfailureif(True)
def test_simple_knownfail():
# Test the known fail mechanism.
assert_equal(1 + 1, 3)
def test_override_builtins():
ok_to_override = set([
'__name__',
'__doc__',
'__package__',
'__loader__',
'__spec__',
'any',
'all',
'sum'
])
# We could use six.moves.builtins here, but that seems
# to do a little more than just this.
if six.PY3:
builtins = sys.modules['builtins']
else:
builtins = sys.modules['__builtin__']
overridden = False
for key in globals().keys():
if key in dir(builtins):
if (globals()[key] != getattr(builtins, key) and
key not in ok_to_override):
print("'%s' was overridden in globals()." % key)
overridden = True
assert not overridden
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
Aircollition/Aircollition | Python scripts/Script_10_SplittingvcMC.py | 1 | 3115 | import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
from scipy.stats import mvn
from LaTeXPy import latexify
def func(U, epsilon):
# Computes the prob any (U_i) is less than epsilon
ind = np.any(U < epsilon, axis = 1)
return ind
def quant(X, alpha):
G = np.sort(X)
size = G.size
index = int(size * alpha)
return G[index]
def phi(X):
out = np.min(X, axis = 1)
return out
epsilon = 0.1 # Choc distance
Nsim = 10**5 # number of Monte Carlo simulations
npoint = 20 # numper of points in the trajectory
Time = 100.0
dmax = 8
v=500.0/60.0 # airplane speed
rc=1.0/57 # param
sigmac=1.0 # param
t = np.linspace(0, Time, npoint);
cov = np.zeros((npoint,npoint), dtype = float)
for i in range(npoint):
for j in range(npoint):
cov[i,j] = 2 * sigmac**2 * (1-np.exp(-2*rc*v*min(t[i],t[j])/sigmac)) * np.exp(-rc*v*np.abs(t[i]-t[j])/sigmac)
A = []
B = []
for distance in np.linspace(0, dmax, 20):
mean = distance * np.ones((npoint,), dtype = float)
# FIN DEFINITION DU PROCESSUS
# Simulation des vecteurs gaussiens
X = np.random.multivariate_normal(mean, cov, size=Nsim)
# Monte Carlo method to calculate the probability
ind_mc = func(X, epsilon)
p_emp_MC = np.mean(ind_mc)
erreur_MC = 1.96*np.sqrt(p_emp_MC*(1-p_emp_MC)/Nsim)
# Splitting Method
# Quantile function
N = 10**5
alpha = 0.5 # quantile level for adapt threshld
X = np.random.multivariate_normal(mean, cov, size=N)
rho = 0.5 # param markovian kernel
nu = np.sqrt(1-rho**2)
S = 0.1 # threshold to be exeeded
q_alpha = quant(phi(X), alpha) # Estimation of quantile
eye = np.eye(npoint) # auxiliary
i = 0
while(q_alpha > S):
w = (phi(X)<q_alpha) # weights for resampling
while(np.sum(w)==0):
X = np.random.multivariate_normal(mean, cov, size=N)
w = (phi(X)<q_alpha)
w = w /np.sum(w)
ind = npr.choice(np.arange(N), size = N, replace = True, p = w) # resampling
Y = X[ind] # resampling
p = rho*Y+nu*np.random.multivariate_normal(mean, eye, size=N) # Markovian kernel application
aux1 = (p.T*(phi(p)<q_alpha)).T
aux2 = (Y.T*(phi(p)>=q_alpha)).T
X = aux1 + aux2 # new population
q_alpha = quant(phi(X), alpha) # position of the next threshold
i=i+1
proba = (1-alpha)**i * np.mean(phi(X)<S) # probability estimation with splitting
A.append(p_emp_MC)
B.append(proba)
low = epsilon * np.ones(npoint)
upp = 100 * np.ones(npoint)
P = []
for distance in np.linspace(0,8,100):
mean = distance * np.ones(npoint)
p,i = mvn.mvnun(low,upp,mean,cov)
P.append(1-p)
latexify()
plt.figure()
plt.grid(True)
plt.semilogy(np.linspace(0, dmax, 20), A, 'rx', label = 'MC')
plt.semilogy(np.linspace(0, dmax, 20), B, 'b.', label ='Splitting')
plt.semilogy(np.linspace(0, 8, 100), P, 'k', label ='num')
plt.xlabel("Separation distance")
plt.ylabel("Probability")
plt.legend()
plt.savefig('Outputs/Script_10_SplittingvsMC.pdf', bbox_inches='tight') | mit |
nesterione/scikit-learn | sklearn/kernel_approximation.py | 258 | 17973 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://www.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| bsd-3-clause |
adelomana/mendiak | saturationCurveAnalysis.py | 1 | 5371 | '''
this script creates the FASTQ files for the saturation curve
'''
import os,sys,random,numpy
import scipy,scipy.interpolate
import matplotlib,matplotlib.pyplot
def butcher(sample,iteration):
'''
this function creates crumbles of FASTQ files for curve saturation analysis
'''
# f.0. read reads (NPI)
possibleFiles=os.listdir(readsFilesDir)
inputFile=readsFilesDir+[element for element in possibleFiles if correspondance[sample] in element][0]
allReads={}
with open(inputFile,'r') as f:
count=0
for line in f:
vector=line[:-2]
count=count+1
if count == 1:
readName=vector
if count == 2:
sequence=vector
if count == 4:
quality=vector
allReads[readName]=[sequence,quality]
count=0
# f.2. go over resolution decrements, saving FASTQ files of appropriate size
allKeys=list(allReads.keys())
numberOfReads=len(allKeys)
mr=int(numberOfReads/1e6)
while mr >= 1:
print('selecting %s mr...'%mr)
# select n random reads
k=int(mr*1e6)
selectedReads=random.sample(allKeys,k)
# write a file
outputFile=piecesDir+sample+'.resolution.%s.iteration.%s.fastq'%(str(mr),str(iteration))
g=open(outputFile,'w')
for read in selectedReads:
g.write('%s\n'%read)
g.write('%s\n'%allReads[read][0])
g.write('+\n')
g.write('%s\n'%allReads[read][1])
g.close()
# next iteration
mr=mr-resolution
return None
# 0. user defined variables
readsFilesDir='/Volumes/omics4tb/alomana/projects/csp.jgi/data/fastq/clean/'
piecesDir='/Volumes/omics4tb/alomana/projects/csp.jgi/data/fastq.crumbles.n5/'
peaksDir='/Volumes/omics4tb/alomana/projects/csp.jgi/data/macs2.crumbles.n5/'
figuresDir='/Users/alomana/gDrive2/tmp/'
resolution=2
iterations=5
correspondance={}
correspondance['0hA']='ASCAO'
correspondance['0hB']='ASCAP'
correspondance['24hA']='ASCAS'
correspondance['24hB']='ASCAT'
correspondance['48hA']='ASCAU'
correspondance['48hB']='ASCAW'
finalSamples={}
finalSamples['0hA']=[33.25,30028]
finalSamples['0hB']=[10.67,9134]
finalSamples['24hA']=[21.15,35467]
finalSamples['24hB']=[15.2, 32323]
finalSamples['48hA']=[19.24,13712]
finalSamples['48hB']=[33.73,18778]
'''
# 1. iterate over samples
for sample in correspondance.keys():
# 2. create FASTQ crumbles
print('working with sample'+sample+'...')
for iteration in range(iterations):
iteration=iteration
print('working with iteration %s...'%str(iteration))
butcher(sample,iteration)
'''
# 3. figure maker
allFiles=os.listdir(peaksDir)
peakFiles=[element for element in allFiles if "_peaks.xls" in element]
# 3.1. reading data
print('reading peak data...')
saturationValues={}
for peakFileName in peakFiles:
pieces=peakFileName.split('_peaks')[0].split('.')
sampleName=pieces[1]
mr=int(pieces[3])
if sampleName not in saturationValues.keys():
saturationValues[sampleName]={}
if mr not in saturationValues[sampleName].keys():
saturationValues[sampleName][mr]=[]
peakFile=peaksDir+peakFileName
with open(peakFile,'r') as f:
lastLine=f.readlines()[-1]
vector=lastLine.split('\t')
peaksTextForm=vector[-1].split('_')[-1]
peaksTextForm=peaksTextForm.replace('\n','')
if peaksTextForm[0] == '#':
formatted=0
elif peaksTextForm[-1].isdigit() == False:
formatted=int(peaksTextForm[:-1])
else:
formatted=int(peaksTextForm)
saturationValues[sampleName][mr].append(formatted)
# 3.2. building graph
print('creating figure...')
colorSchema={}
colorSchema['0hA']='darkgreen'
colorSchema['0hB']='green'
colorSchema['24hA']='darkred'
colorSchema['24hB']='red'
colorSchema['48hA']='blue'
colorSchema['48hB']='darkblue'
allSamples=list(saturationValues.keys())
allSamples.sort()
for sample in allSamples:
x=list(saturationValues[sample].keys())
x.sort()
y=[];err=[]
for element in x:
y.append(numpy.mean(saturationValues[sample][element]))
err.append(numpy.std(saturationValues[sample][element]))
# setting up the color
theColor=colorSchema[sample]
# plotting the rest of the trajectory
matplotlib.pyplot.errorbar(x,y,yerr=err,fmt='o',color=theColor,alpha=0.5,mew=0.)
# adding final sample
finalx=finalSamples[sample][0]
finaly=finalSamples[sample][1]
matplotlib.pyplot.plot(finalx,finaly,'s',color=theColor,mew=0.)
# interpolating lines
interpolation=scipy.interpolate.PchipInterpolator(x,y)
xnew=numpy.linspace(min(x),max(x),num=500,endpoint=True)
ynew=interpolation(xnew)
matplotlib.pyplot.plot(xnew,ynew,'-',lw=2,color=theColor,label=sample)
matplotlib.pyplot.xlim([0,35])
matplotlib.pyplot.ylim([-1000,40000])
matplotlib.pyplot.xlabel('sequencing depth (mr)',fontsize=28)
matplotlib.pyplot.ylabel('peaks (x1e3)',fontsize=28)
matplotlib.pyplot.legend(loc=2)
matplotlib.pyplot.yticks([5000,10000,15000,20000,25000,30000,35000,40000],['5','10','15','20','25','30','35','40'],fontsize=20)
matplotlib.pyplot.xticks(fontsize=20)
matplotlib.pyplot.tight_layout()
matplotlib.pyplot.savefig(figuresDir+'saturationCurve.png')
| gpl-3.0 |
dmargala/blupe | python/plot_fit_tpcorr_results.py | 1 | 5943 | #!/usr/bin/env python
import argparse
import glob
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
mpl.rcParams.update({'font.size': 10})
import matplotlib.pyplot as plt
def mad(arr):
"""
Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variabililty of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation
"""
arr = np.ma.array(arr).compressed() # should be faster to not use masked arrays.
med = np.median(arr)
return np.median(np.abs(arr - med))
def nmad(arr):
return 1.4826*mad(arr)
def add_stat_legend(x):
textstr = ''
textstr += '$\mathrm{N}=%d$\n' % len(x)
textstr += '$\mathrm{mean}=%.2f$\n' % np.nanmean(x)
textstr += '$\mathrm{median}=%.2f$\n' % np.nanmedian(x)
textstr += '$\mathrm{nmad}=%.2f$' % nmad(x)
props = dict(boxstyle='round', facecolor='white')
plt.text(0.95, 0.95, textstr, transform=plt.gca().transAxes,
va='top', ha='right', bbox=props)
def is_outlier(points, thresh=3.5):
"""
Returns a boolean array with True if points are outliers and False
otherwise.
Parameters:
-----------
points : An numobservations by numdimensions array of observations
thresh : The modified z-score to use as a threshold. Observations with
a modified z-score (based on the median absolute deviation) greater
than this value will be classified as outliers.
Returns:
--------
mask : A numobservations-length boolean array.
References:
----------
Boris Iglewicz and David Hoaglin (1993), "Volume 16: How to Detect and
Handle Outliers", The ASQC Basic References in Quality Control:
Statistical Techniques, Edward F. Mykytka, Ph.D., Editor.
"""
if len(points.shape) == 1:
points = points[:,None]
median = np.median(points, axis=0)
diff = np.sum((points - median)**2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.median(diff)
modified_z_score = 0.6745 * diff / med_abs_deviation
return modified_z_score > thresh
def main():
# parse command-line arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--verbose", action="store_true",
help="print verbose output")
parser.add_argument("-o", "--output", type=str, default=None,
help="output file")
parser.add_argument("-i", "--input", type=str, default=None,
help="required input file")
parser.add_argument("--mask-outliers", action="store_true",
help="mask outliers in samples")
args = parser.parse_args()
filenames = glob.glob(args.input)
nfiles = len(filenames)
nparams = 3
summary = np.empty((nfiles, nparams))
for i,filename in enumerate(filenames):
# data columns:
# x0 a1 a2
data = np.loadtxt(filename, ndmin=2)
summary[i] = np.median(data, axis=0)
outlier_thres = 20
# save results summary plot
fig = plt.figure(figsize=(12,12))
ax1 = plt.subplot2grid((3,3), (0,0))
ax2 = plt.subplot2grid((3,3), (1,1))
ax3 = plt.subplot2grid((3,3), (2,2))
ax4 = plt.subplot2grid((3,3), (1,0))
ax5 = plt.subplot2grid((3,3), (2,0))
ax6 = plt.subplot2grid((3,3), (2,1))
# plot the fit parameter distributions
def plot_param_dist(x, binspec, color, xlabel):
xmin, xmax, nxbins = [element for tupl in binspec for element in tupl]
print 'Stats for %s' % xlabel
print 'min, max: %.4g, %.4g' % (xmin, xmax)
if args.mask_outliers:
mask = ~is_outlier(x, thresh=outlier_thres)
print 'number of masked entries: %d' % len(np.flatnonzero(~mask))
print 'list of masked entries and values: '
for index in np.flatnonzero(~mask):
filename = filenames[index]
print '-'.join(filename.split('.')[0].split('-')[-2:]), x[index]
x = x[mask]
print 'min, max (masked): %f, %f' % (np.min(x), np.max(x))
print
plt.hist(x, bins=np.linspace(xmin, xmax, nxbins+1), facecolor=color, alpha=.5, histtype='stepfilled')
plt.xlabel(xlabel)
plt.xlim([xmin, xmax])
plt.grid()
add_stat_legend(x)
plimits = ((3000, 6500), (-.3,3.2), (-.8,.6))
pbins = ((50,), (50,), (50,))
plt.sca(ax1)
plot_param_dist(summary[:,0], (plimits[0],pbins[0]), 'blue', r'$x_0$')
plt.sca(ax2)
plot_param_dist(summary[:,1], (plimits[1],pbins[1]), 'green', r'$a_1$')
plt.sca(ax3)
plot_param_dist(summary[:,2], (plimits[2],pbins[2]), 'red', r'$a_2$')
# plot the fit parameter distributions
def plot_param_scatter(x, y, xlim, ylim, xlabel, ylabel):
if args.mask_outliers:
mask = ~is_outlier(x, thresh=outlier_thres) & ~is_outlier(y, thresh=outlier_thres)
x = x[mask]
y = y[mask]
plt.plot(x, y, '+', ms=5)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.xlim(xlim)
plt.ylim(ylim)
plt.grid()
# calculate correlation coefficient
corr = np.corrcoef(x, y)
rho = corr[0,1]
# add text box
textstr = ''
textstr += '$\mathrm{N}=%d$\n' % len(x)
textstr += r'$\rho=%.2f$' % rho
props = dict(boxstyle='round', facecolor='white')
plt.text(0.95, 0.95, textstr, transform=plt.gca().transAxes, va='top', ha='right', bbox=props)
plt.sca(ax4)
plot_param_scatter(summary[:,0], summary[:,1], plimits[0], plimits[1], r'$x_0$', r'$a_1$')
plt.sca(ax5)
plot_param_scatter(summary[:,0], summary[:,2], plimits[0], plimits[2], r'$x_0$', r'$a_2$')
plt.sca(ax6)
plot_param_scatter(summary[:,1], summary[:,2], plimits[1], plimits[2], r'$a_1$', r'$a_2$')
plt.tight_layout()
fig.savefig(args.output)
if __name__ == '__main__':
main()
| mit |
orlox/hall_evolution | as_full/plotter.py | 1 | 4580 | #!/usr/bin/env python
"""
Program to produce graphs from the output of the as_toroidal program
"""
import matplotlib.pyplot as plt
from pylab import *
import sys
import os
import math
#get folder name, if not specified as argument, use the latest one
folder=""
if len(sys.argv)<2:
timestamp="0"
for root, dirs, files in os.walk("."):
for name in dirs:
if name[:8]=="results_" and len(name)==18:
newtimestamp=name[8:]
if newtimestamp>timestamp:
folder=name
timestamp=newtimestamp
else:
# First argument to program is folder with results
folder=sys.argv[1]
#add "/" if it is not given in folder name
if folder[len(folder)-1]!="/":
folder=folder+"/"
#Show what folder is being used
print("Plotting data in "+folder)
# Open parameters file
params=open(folder+'params.dat','r')
data=params.readline().split(":");
rNum=int(data[1])
data=params.readline().split(":");
thNum=int(data[1])
data=params.readline().split(":");
factor=float(data[1])
data=params.readline().split(":");
tNum=int(data[1])
data=params.readline().split(":");
plotSteps=int(data[1])
data=params.readline().split(":");
rmin=float(data[1])
data=params.readline().split(":");
thtd=float(data[1])
params.close()
#solve values of dr and dth
dr=(1.0-rmin)/(rNum-1)
dth=math.pi/(thNum-1)
#Create array to store A and B at each step
A=zeros((2*rNum-1,2*thNum-1));
B=zeros((rNum,thNum));
# make this smaller to increase the resolution
dx=0.005
# these variables are used by matplotlib
x = arange(0, 1, dx)
y = arange(-1, 1, dx)
X,Y = meshgrid(x, y)
ZA=zeros((len(x),len(y)))
ZB=zeros((len(x),len(y)))
#get place in grid that corresponds to each place in Z grid
grid=zeros((len(x),len(y),2))
i,j=0,0
for xvalue in x :
for yvalue in y:
r=math.sqrt(xvalue**2+yvalue**2)
th=math.acos(yvalue/r)
rStep=int((r-rmin+dr/2)/dr)
thStep=int((th+dth/2)/dth)
if rStep<=0 or rStep>=rNum or thStep==0 or thStep>=thNum:
grid[i][j][0]=0
grid[i][j][1]=0
j+=1
continue
grid[i][j][0]=rStep
grid[i][j][1]=thStep
j+=1
j=0
i+=1
k=0
while 1:
#do not plot this timestep if plot exists
#add zeros to the number of the plot, so they are ordered appropately
num_file=str(k)
diff_zeros=len(str(tNum))-len(str(k))
while diff_zeros>0:
num_file="0"+num_file
diff_zeros-=1
try:
data=open(folder+"plot_"+num_file+".png")
data.close()
k+=plotSteps
continue
except:
pass
#read A file
data
try:
data=open(folder+"A_"+str(k),'r')
except IOError as e:
break
#first line has simulation time
t=data.readline()
i,j=0,0
for line in data:
values=line.split(" ")
for value in values:
if j==thNum:
break
A[i][j]=float(value)
j+=1
j=0
i+=1
data.close()
#read B file
try:
data=open(folder+"B_"+str(k),'r')
except IOError as e:
break
#first line has simulation time
t=data.readline()
i,j=0,0
for line in data:
values=line.split(" ")
for value in values:
if j==thNum:
break
B[i][j]=float(value)
j+=1
j=0
i+=1
data.close()
i,j=0,0
for xvalue in x :
for yvalue in y:
ZA[i][j]=A[grid[i][j][0]][grid[i][j][1]]
ZB[i][j]=B[grid[i][j][0]][grid[i][j][1]]
j+=1
j=0
i+=1
fig=plt.figure()
#add data
figtext(0.02, 0.95, "Radial steps: "+str(rNum)+" Angular steps: "+str(thNum)+" Factor: "+str(factor)+ " t_h/t_d: "+str(thtd)+" t: "+t)
#figtext(0.25, 0.95, "Angular steps: "+str(thNum))
#figtext(0.45, 0.95, "Factor: "+str(factor))
#figtext(0.6, 0.95, "t_h/t_d: "+str(thtd))
#figtext(0.8, 0.95, "t: "+t)
#create plot
a=fig.add_subplot(1,2,1)
plt.imshow(ZA.T,extent=[0,1,-1,1],origin="lower")
plt.colorbar()
a.set_title("alpha")
a=fig.add_subplot(1,2,2)
plt.imshow(ZB.T,extent=[0,1,-1,1],origin="lower")
plt.colorbar()
a.set_title("beta")
#save to file
savefig(folder+"plot_"+num_file+".png", dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1)
close()
print num_file
k+=plotSteps
sys.exit()
| gpl-2.0 |
rew4332/tensorflow | tensorflow/examples/skflow/mnist_rnn.py | 14 | 2812 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This example builds rnn network for mnist data.
Borrowed structure from here: https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3%20-%20Neural%20Networks/recurrent_network.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import metrics, preprocessing
import tensorflow as tf
from tensorflow.contrib import learn
# Parameters
learning_rate = 0.1
training_steps = 3000
batch_size = 128
# Network Parameters
n_input = 28 # MNIST data input (img shape: 28*28)
n_steps = 28 # timesteps
n_hidden = 128 # hidden layer num of features
n_classes = 10 # MNIST total classes (0-9 digits)
### Download and load MNIST data.
mnist = learn.datasets.load_dataset('mnist')
X_train = mnist.train.images
y_train = mnist.train.labels
X_test = mnist.test.images
y_test = mnist.test.labels
# It's useful to scale to ensure Stochastic Gradient Descent will do the right thing
scaler = preprocessing.StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.fit_transform(X_test)
def rnn_model(X, y):
X = tf.reshape(X, [-1, n_steps, n_input]) # (batch_size, n_steps, n_input)
# # permute n_steps and batch_size
X = tf.transpose(X, [1, 0, 2])
# # Reshape to prepare input to hidden activation
X = tf.reshape(X, [-1, n_input]) # (n_steps*batch_size, n_input)
# # Split data because rnn cell needs a list of inputs for the RNN inner loop
X = tf.split(0, n_steps, X) # n_steps * (batch_size, n_input)
# Define a GRU cell with tensorflow
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden)
# Get lstm cell output
_, encoding = tf.nn.rnn(lstm_cell, X, dtype=tf.float32)
return learn.models.logistic_regression(encoding, y)
classifier = learn.TensorFlowEstimator(model_fn=rnn_model, n_classes=n_classes,
batch_size=batch_size,
steps=training_steps,
learning_rate=learning_rate)
classifier.fit(X_train, y_train, logdir="/tmp/mnist_rnn")
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Accuracy: {0:f}'.format(score))
| apache-2.0 |
Frankkkkk/arctic | tests/unit/store/test_version_item.py | 2 | 1162 | import pandas as pd
from arctic.store.versioned_item import VersionedItem
def test_versioned_item_str():
item = VersionedItem(symbol="sym",
library="ONEMINUTE",
data=pd.DataFrame(),
version=1.0,
metadata={'metadata': 'foo'})
expected = "VersionedItem(symbol=sym,library=ONEMINUTE," + \
"data=<class 'pandas.core.frame.DataFrame'>,version=1.0,metadata={'metadata': 'foo'}"
assert str(item) == expected
assert repr(item) == expected
def test_versioned_item_str_handles_none():
item = VersionedItem(symbol=None,
library=None,
data=None,
version=None,
metadata=None)
assert str(item)
def test_versioned_item_metadata_dict():
item = VersionedItem(symbol="test",
library="test_lib",
data=None,
version=1.2,
metadata=None)
assert(item.metadata_dict() == {'symbol': 'test', 'library': 'test_lib', 'version': 1.2})
| lgpl-2.1 |
yidawang/brainiak | examples/factoranalysis/htfa_cv_example.py | 7 | 11484 | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import math
import requests
import scipy.io
import numpy as np
from mpi4py import MPI
from subprocess import call
from scipy.stats import stats
from sklearn import model_selection
from sklearn.metrics import mean_squared_error
def recon_err(data, F, W):
"""Calcuate reconstruction error
Parameters
----------
data : 2D array
True data to recover.
F : 2D array
HTFA factor matrix.
W : 2D array
HTFA weight matrix.
Returns
-------
float
Returns root mean squared reconstruction error.
"""
recon = F.dot(W).ravel()
err = mean_squared_error(
data.ravel(),
recon,
multioutput='uniform_average')
return math.sqrt(err)
def get_train_err(htfa, data, F):
"""Calcuate training error
Parameters
----------
htfa : HTFA
An instance of HTFA, factor anaysis class in BrainIAK.
data : 2D array
Input data to HTFA.
F : 2D array
HTFA factor matrix.
Returns
-------
float
Returns root mean squared error on training.
"""
W = htfa.get_weights(data, F)
return recon_err(data, F, W)
def get_test_err(htfa, test_weight_data, test_recon_data,
test_weight_R, test_recon_R, centers, widths):
"""Calcuate test error
Parameters
----------
htfa : HTFA
An instance of HTFA, factor anaysis class in BrainIAK.
test_weigth_data : 2D array
Data used for testing weights.
test_recon_data : 2D array
Data used for testing reconstruction error.
test_weigth_R : 2D array
Coordinate matrix used for testing weights.
test_recon_R : 2D array
Coordinate matrix used for testing reconstruction error.
centers : 2D array
Center matrix of HTFA factors.
widths : 1D array
Width matrix of HTFA factors.
Returns
-------
float
Returns root mean squared error on test.
"""
# calculate F on test_weight_R, based on trained centers/widths
unique_R, inds = htfa.get_unique_R(test_weight_R)
F = htfa.get_factors(unique_R,
inds,
centers,
widths)
# calculate weights on test_weight_data
W = htfa.get_weights(test_weight_data, F)
# calculate F on final test_recon_data
unique_R, inds = htfa.get_unique_R(test_recon_R)
F = htfa.get_factors(unique_R,
inds,
centers,
widths)
return recon_err(test_recon_data, F, W)
n_subj = 2
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
group_id = int(rank/n_subj)
n_group = math.ceil(size/n_subj)
htfa_comm = comm.Split(group_id, rank)
htfa_rank = htfa_comm.Get_rank()
htfa_size = htfa_comm.Get_size()
if rank == 0:
import logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
data_dir = os.path.join(os.getcwd(), 'data')
if rank == 0 and not os.path.exists(data_dir):
os.makedirs(data_dir)
url = []
url.append(' https://www.dropbox.com/s/r5s9tg4ekxzbrco/s0.mat?dl=0')
url.append(' https://www.dropbox.com/s/39tr01m76vxwaqa/s1.mat?dl=0')
for idx in range(n_subj):
if idx % size == rank:
file_name = os.path.join(data_dir, 's' + str(idx) + '.mat')
#check if file has already been downloaded
if not os.path.exists(file_name):
#check if URL exists
ret = requests.head(url[idx])
if ret.status_code == 200:
#download data
cmd = 'curl --location -o ' + file_name + url[idx]
try:
retcode = call(cmd, shell=True)
if retcode < 0:
print("File download was terminated by signal", -retcode, file=sys.stderr)
else:
print("File download returned", retcode, file=sys.stderr)
except OSError as e:
print("File download failed:", e, file=sys.stderr)
else:
print("File s%d.mat does not exist!\n"%idx)
comm.Barrier()
#get fMRI data and scanner RAS coordinates
data = []
R = []
mapping = {}
n_local_subj = 0
for idx in range(n_subj):
if idx % htfa_size == htfa_rank:
file_name = os.path.join(data_dir, 's' + str(idx) + '.mat')
all_data = scipy.io.loadmat(file_name)
bold = all_data['data']
# z-score the data
bold = stats.zscore(bold, axis=1, ddof=1)
data.append(bold)
R.append(all_data['R'])
mapping[str(n_local_subj)] = idx
n_local_subj += 1
min_K = 3
max_K = 6
n_K = 2
Ks = np.linspace(min_K, max_K, n_K, endpoint=True).astype(int)
n_splits = 3
# recon_err in shape n_splits*n_K
test_recon_errs = np.zeros((n_subj, n_splits, n_K))
tmp_test_recon_errs = np.zeros((n_subj, n_splits, n_K))
train_recon_errs = np.zeros((n_subj, n_splits, n_K))
tmp_train_recon_errs = np.zeros((n_subj, n_splits, n_K))
local_size = math.ceil(n_subj/size)
if n_local_subj > 0:
from brainiak.factoranalysis.htfa import HTFA
n_voxel, n_tr = data[0].shape
n_dim = R[0].shape[1]
test_size = 0.3
rnd_seed_voxel = 30000
rnd_seed_tr = 3000
tr_solver = 'exact'
nlss_method = 'dogbox'
nlss_loss = 'linear'
upper_ratio = 1.8
lower_ratio = 0.1
voxel_ratio = 0.25
tr_ratio = 0.1
max_voxel = 2000
max_tr = 200
max_sample_voxel = min(max_voxel,
int(voxel_ratio * n_voxel))
max_sample_tr = min(max_tr, int(tr_ratio * n_tr))
#split voxel and TR for two-level cross validation
ss_voxel = model_selection.ShuffleSplit(
n_splits=n_splits,
test_size=test_size,
random_state=rnd_seed_voxel)
voxel_indices = np.arange(n_voxel)
ss_voxel.get_n_splits(voxel_indices)
ss_tr = model_selection.ShuffleSplit(
n_splits=n_splits,
test_size=test_size,
random_state=rnd_seed_tr)
tr_indices = np.arange(n_tr)
ss_tr.get_n_splits(tr_indices)
train_voxels = []
test_voxels = []
train_trs = []
test_trs = []
for train_index, test_index in ss_voxel.split(voxel_indices):
train_voxels.append(train_index)
test_voxels.append(test_index)
for train_index, test_index in ss_tr.split(tr_indices):
train_trs.append(train_index)
test_trs.append(test_index)
for p in range(n_splits):
for idx in range(n_K):
index = p*n_K + idx
if index % n_group == group_id:
#split data and R
train_voxel_indices = train_voxels[p]
test_voxel_indices = test_voxels[p]
train_tr_indices = train_trs[p]
test_tr_indices = test_trs[p]
train_data = []
total_test_data = []
test_weight_data = []
test_recon_data = []
test_weight_R = []
test_recon_R = []
for s in range(n_local_subj):
train_data.append(data[s][:, train_tr_indices])
total_test_data.append(data[s][:, test_tr_indices])
test_weight_data.append(
total_test_data[s][train_voxel_indices, :])
test_recon_data.append(
total_test_data[s][test_voxel_indices, :])
test_weight_R.append(R[s][train_voxel_indices])
test_recon_R.append(R[s][test_voxel_indices])
htfa = HTFA(K=Ks[idx],
max_global_iter=5,
max_local_iter=2,
n_subj=n_subj,
nlss_method=nlss_method,
nlss_loss=nlss_loss,
tr_solver=tr_solver,
upper_ratio=upper_ratio,
lower_ratio=lower_ratio,
max_tr=max_sample_tr,
max_voxel=max_sample_voxel,
comm=htfa_comm,
verbose=True)
htfa.fit(train_data, R)
for s in range(n_local_subj):
#get posterior for each subject
subj_idx = mapping[str(s)]
start_idx = s * htfa.prior_size
end_idx = (s + 1) * htfa.prior_size
local_posteiror = htfa.local_posterior_[start_idx:end_idx]
local_centers = htfa.get_centers(local_posteiror)
local_widths = htfa.get_widths(local_posteiror)
htfa.n_dim = n_dim
htfa.cov_vec_size = np.sum(np.arange(htfa.n_dim) + 1)
htfa.map_offset = htfa.get_map_offset()
#training happens on all voxels, but part of TRs
unique_R_all, inds_all = htfa.get_unique_R(R[s])
train_F = htfa.get_factors(unique_R_all,
inds_all,
local_centers,
local_widths)
#calculate train_recon_err
tmp_train_recon_errs[subj_idx, p,idx] = get_train_err(htfa,
train_data[s],
train_F)
#calculate weights on test_weight_data, test_recon_err on test_recon_data
tmp_test_recon_errs[subj_idx, p,idx] = get_test_err(htfa,
test_weight_data[s],
test_recon_data[s],
test_weight_R[s],
test_recon_R[s],
local_centers,
local_widths)
comm.Reduce(tmp_test_recon_errs, test_recon_errs, op=MPI.SUM)
comm.Reduce(tmp_train_recon_errs, train_recon_errs, op=MPI.SUM)
if rank == 0:
errs = train_recon_errs.reshape(n_subj * n_splits, n_K)
mean_errs = np.average(errs, axis=0)
print("train error on each K is\n")
print(mean_errs)
errs = test_recon_errs.reshape(n_subj * n_splits, n_K)
mean_errs = np.average(errs, axis=0)
print("test error on each K is\n")
print(mean_errs)
best_idx = np.argmin(mean_errs)
print("best K for test recon is %d " % (Ks[best_idx]))
| apache-2.0 |
martinwicke/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/io_test.py | 20 | 5003 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.learn IO operation tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn.learn_io import *
# pylint: enable=wildcard-import
class IOTest(tf.test.TestCase):
# pylint: disable=undefined-variable
"""tf.learn IO operation tests."""
def test_pandas_dataframe(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.DataFrame(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels[0], list(classifier.predict(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
else:
print("No pandas installed. pandas-related tests are skipped.")
def test_pandas_series(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.Series(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels, list(classifier.predict(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
def test_string_data_formats(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
with self.assertRaises(ValueError):
learn.io.extract_pandas_data(pd.DataFrame({"Test": ["A", "B"]}))
with self.assertRaises(ValueError):
learn.io.extract_pandas_labels(pd.DataFrame({"Test": ["A", "B"]}))
def test_dask_io(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# test dask.dataframe
df = pd.DataFrame(
dict(a=list("aabbcc"), b=list(range(6))),
index=pd.date_range(start="20100101", periods=6))
ddf = dd.from_pandas(df, npartitions=3)
extracted_ddf = extract_dask_data(ddf)
self.assertEqual(
extracted_ddf.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_ddf.divisions))
self.assertEqual(
extracted_ddf.columns.tolist(), ["a", "b"],
"Failed with columns = {0}".format(extracted_ddf.columns))
# test dask.series
labels = ddf["a"]
extracted_labels = extract_dask_labels(labels)
self.assertEqual(
extracted_labels.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_labels.divisions))
# labels should only have one column
with self.assertRaises(ValueError):
extract_dask_labels(ddf)
else:
print("No dask installed. dask-related tests are skipped.")
def test_dask_iris_classification(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
data = dd.from_pandas(data, npartitions=2)
labels = pd.DataFrame(iris.target)
labels = dd.from_pandas(labels, npartitions=2)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
predictions = data.map_partitions(classifier.predict).compute()
score = accuracy_score(labels.compute(), predictions)
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
AnoshZahir/Greengraph | Greengraph/Test/test_map.py | 1 | 2863 | from ..graph import Greengraph
from ..map import Map
import requests
import numpy as np
import matplotlib
import os
import yaml
from nose.tools import assert_called_with, assert_equal
from mock import patch
@patch.object(requests, 'get')
@patch.object(matplotlib.image, 'imread')
def test_build_default_params(mock_imread, mock_get):
with open(os.path.join(os.path.dirname(__file__),'data','map_data.yaml')) as dataset:
map_data = yaml.load(dataset)['test_map']
for data in map_data:
test = data.pop('test')
url = data.pop('url')
latitude = data.pop('latitude')
longitude = data.pop('longitude')
params = data.pop('params')
if (test == 'default'):
actual_map = Map(latitude,longitude)
elif (test == 'satellite_false'):
actual_map = Map(latitude,longitude,satellite = False)
elif (test == 'zoom'):
actual_map = Map(latitude,longitude,zoom = 30)
elif (test == 'size'):
actual_map = Map(latitude,longitude,size = (300,300))
elif (test == 'sensor_true'):
actual_map = Map(latitude,longitude,sensor = True)
mock_get.assert_called_with(url,params=params)
@patch.object(requests, 'get')
@patch(matplotlib.image, 'imread')
#@patch.object(Map, 'green')
def test_green(mock_imread, mock_get):
'''
Test that the function can take a 3d matrix, and return a matrix of true/false values.
Data is taken from the 'test_green' subsection of map_data.yaml.
'''
my_map = Map(51.50, -0.12)
with open(os.path.join(os.path.dirname(__file__), 'data', 'map_data.yaml')) as dataset:
map_data = yaml.load(dataset)['test_green']
for data in map_data:
threshold = data.pop('test')
input_matrix = data.pop('3d_input_matrix')
expected_return = data.pop('2d_output_matrix')
my_map.__setattr__('pixels', input_matrix)
actual_return = my_map.green(threshold)
assert_equal(expected_return, actual_return)
@patch.object(Map, 'green')
def test_count_green(mock_green):
'''
Test that a 2d input matrix of true/false values is correctly summed.
Data is taken from the 'test_count_green' subsection of map_data.yaml.
'''
my_map = Map(51.50, -0.12)
with open(os.path.join(os.path.dirname(__file__), 'data', 'map_data.yaml')) as dataset:
map_data = yaml.load(dataset)['test_count_green']
for data in map_data:
input_values = data.pop('input_values')
expected_return = data.pop('result')
mock_green.return_value = input_values
actual_return = my_map.count_green() #threshold kept unchanged for this test.
assert_equal(actual_return, expected_return) | mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.