repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
AlexRobson/scikit-learn | benchmarks/bench_sgd_regression.py | 283 | 5569 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
| bsd-3-clause |
endolith/scipy | scipy/cluster/vq.py | 11 | 29250 | """
K-means clustering and vector quantization (:mod:`scipy.cluster.vq`)
====================================================================
Provides routines for k-means clustering, generating code books
from k-means models and quantizing vectors by comparing them with
centroids in a code book.
.. autosummary::
:toctree: generated/
whiten -- Normalize a group of observations so each feature has unit variance
vq -- Calculate code book membership of a set of observation vectors
kmeans -- Perform k-means on a set of observation vectors forming k clusters
kmeans2 -- A different implementation of k-means with more methods
-- for initializing centroids
Background information
----------------------
The k-means algorithm takes as input the number of clusters to
generate, k, and a set of observation vectors to cluster. It
returns a set of centroids, one for each of the k clusters. An
observation vector is classified with the cluster number or
centroid index of the centroid closest to it.
A vector v belongs to cluster i if it is closer to centroid i than
any other centroid. If v belongs to i, we say centroid i is the
dominating centroid of v. The k-means algorithm tries to
minimize distortion, which is defined as the sum of the squared distances
between each observation vector and its dominating centroid.
The minimization is achieved by iteratively reclassifying
the observations into clusters and recalculating the centroids until
a configuration is reached in which the centroids are stable. One can
also define a maximum number of iterations.
Since vector quantization is a natural application for k-means,
information theory terminology is often used. The centroid index
or cluster index is also referred to as a "code" and the table
mapping codes to centroids and, vice versa, is often referred to as a
"code book". The result of k-means, a set of centroids, can be
used to quantize vectors. Quantization aims to find an encoding of
vectors that reduces the expected distortion.
All routines expect obs to be an M by N array, where the rows are
the observation vectors. The codebook is a k by N array, where the
ith row is the centroid of code word i. The observation vectors
and centroids have the same feature dimension.
As an example, suppose we wish to compress a 24-bit color image
(each pixel is represented by one byte for red, one for blue, and
one for green) before sending it over the web. By using a smaller
8-bit encoding, we can reduce the amount of data by two
thirds. Ideally, the colors for each of the 256 possible 8-bit
encoding values should be chosen to minimize distortion of the
color. Running k-means with k=256 generates a code book of 256
codes, which fills up all possible 8-bit sequences. Instead of
sending a 3-byte value for each pixel, the 8-bit centroid index
(or code word) of the dominating centroid is transmitted. The code
book is also sent over the wire so each 8-bit code can be
translated back to a 24-bit pixel value representation. If the
image of interest was of an ocean, we would expect many 24-bit
blues to be represented by 8-bit codes. If it was an image of a
human face, more flesh-tone colors would be represented in the
code book.
"""
import warnings
import numpy as np
from collections import deque
from scipy._lib._util import _asarray_validated, check_random_state,\
rng_integers
from scipy.spatial.distance import cdist
from . import _vq
__docformat__ = 'restructuredtext'
__all__ = ['whiten', 'vq', 'kmeans', 'kmeans2']
class ClusterError(Exception):
pass
def whiten(obs, check_finite=True):
"""
Normalize a group of observations on a per feature basis.
Before running k-means, it is beneficial to rescale each feature
dimension of the observation set by its standard deviation (i.e. "whiten"
it - as in "white noise" where each frequency has equal power).
Each feature is divided by its standard deviation across all observations
to give it unit variance.
Parameters
----------
obs : ndarray
Each row of the array is an observation. The
columns are the features seen during each observation.
>>> # f0 f1 f2
>>> obs = [[ 1., 1., 1.], #o0
... [ 2., 2., 2.], #o1
... [ 3., 3., 3.], #o2
... [ 4., 4., 4.]] #o3
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
result : ndarray
Contains the values in `obs` scaled by the standard deviation
of each column.
Examples
--------
>>> from scipy.cluster.vq import whiten
>>> features = np.array([[1.9, 2.3, 1.7],
... [1.5, 2.5, 2.2],
... [0.8, 0.6, 1.7,]])
>>> whiten(features)
array([[ 4.17944278, 2.69811351, 7.21248917],
[ 3.29956009, 2.93273208, 9.33380951],
[ 1.75976538, 0.7038557 , 7.21248917]])
"""
obs = _asarray_validated(obs, check_finite=check_finite)
std_dev = obs.std(axis=0)
zero_std_mask = std_dev == 0
if zero_std_mask.any():
std_dev[zero_std_mask] = 1.0
warnings.warn("Some columns have standard deviation zero. "
"The values of these columns will not change.",
RuntimeWarning)
return obs / std_dev
def vq(obs, code_book, check_finite=True):
"""
Assign codes from a code book to observations.
Assigns a code from a code book to each observation. Each
observation vector in the 'M' by 'N' `obs` array is compared with the
centroids in the code book and assigned the code of the closest
centroid.
The features in `obs` should have unit variance, which can be
achieved by passing them through the whiten function. The code
book can be created with the k-means algorithm or a different
encoding algorithm.
Parameters
----------
obs : ndarray
Each row of the 'M' x 'N' array is an observation. The columns are
the "features" seen during each observation. The features must be
whitened first using the whiten function or something equivalent.
code_book : ndarray
The code book is usually generated using the k-means algorithm.
Each row of the array holds a different code, and the columns are
the features of the code.
>>> # f0 f1 f2 f3
>>> code_book = [
... [ 1., 2., 3., 4.], #c0
... [ 1., 2., 3., 4.], #c1
... [ 1., 2., 3., 4.]] #c2
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
code : ndarray
A length M array holding the code book index for each observation.
dist : ndarray
The distortion (distance) between the observation and its nearest
code.
Examples
--------
>>> from numpy import array
>>> from scipy.cluster.vq import vq
>>> code_book = array([[1.,1.,1.],
... [2.,2.,2.]])
>>> features = array([[ 1.9,2.3,1.7],
... [ 1.5,2.5,2.2],
... [ 0.8,0.6,1.7]])
>>> vq(features,code_book)
(array([1, 1, 0],'i'), array([ 0.43588989, 0.73484692, 0.83066239]))
"""
obs = _asarray_validated(obs, check_finite=check_finite)
code_book = _asarray_validated(code_book, check_finite=check_finite)
ct = np.common_type(obs, code_book)
c_obs = obs.astype(ct, copy=False)
c_code_book = code_book.astype(ct, copy=False)
if np.issubdtype(ct, np.float64) or np.issubdtype(ct, np.float32):
return _vq.vq(c_obs, c_code_book)
return py_vq(obs, code_book, check_finite=False)
def py_vq(obs, code_book, check_finite=True):
""" Python version of vq algorithm.
The algorithm computes the Euclidean distance between each
observation and every frame in the code_book.
Parameters
----------
obs : ndarray
Expects a rank 2 array. Each row is one observation.
code_book : ndarray
Code book to use. Same format than obs. Should have same number of
features (e.g., columns) than obs.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
code : ndarray
code[i] gives the label of the ith obversation; its code is
code_book[code[i]].
mind_dist : ndarray
min_dist[i] gives the distance between the ith observation and its
corresponding code.
Notes
-----
This function is slower than the C version but works for
all input types. If the inputs have the wrong types for the
C versions of the function, this one is called as a last resort.
It is about 20 times slower than the C version.
"""
obs = _asarray_validated(obs, check_finite=check_finite)
code_book = _asarray_validated(code_book, check_finite=check_finite)
if obs.ndim != code_book.ndim:
raise ValueError("Observation and code_book should have the same rank")
if obs.ndim == 1:
obs = obs[:, np.newaxis]
code_book = code_book[:, np.newaxis]
dist = cdist(obs, code_book)
code = dist.argmin(axis=1)
min_dist = dist[np.arange(len(code)), code]
return code, min_dist
# py_vq2 was equivalent to py_vq
py_vq2 = np.deprecate(py_vq, old_name='py_vq2', new_name='py_vq')
def _kmeans(obs, guess, thresh=1e-5):
""" "raw" version of k-means.
Returns
-------
code_book
The lowest distortion codebook found.
avg_dist
The average distance a observation is from a code in the book.
Lower means the code_book matches the data better.
See Also
--------
kmeans : wrapper around k-means
Examples
--------
Note: not whitened in this example.
>>> from numpy import array
>>> from scipy.cluster.vq import _kmeans
>>> features = array([[ 1.9,2.3],
... [ 1.5,2.5],
... [ 0.8,0.6],
... [ 0.4,1.8],
... [ 1.0,1.0]])
>>> book = array((features[0],features[2]))
>>> _kmeans(features,book)
(array([[ 1.7 , 2.4 ],
[ 0.73333333, 1.13333333]]), 0.40563916697728591)
"""
code_book = np.asarray(guess)
diff = np.inf
prev_avg_dists = deque([diff], maxlen=2)
while diff > thresh:
# compute membership and distances between obs and code_book
obs_code, distort = vq(obs, code_book, check_finite=False)
prev_avg_dists.append(distort.mean(axis=-1))
# recalc code_book as centroids of associated obs
code_book, has_members = _vq.update_cluster_means(obs, obs_code,
code_book.shape[0])
code_book = code_book[has_members]
diff = prev_avg_dists[0] - prev_avg_dists[1]
return code_book, prev_avg_dists[1]
def kmeans(obs, k_or_guess, iter=20, thresh=1e-5, check_finite=True,
*, seed=None):
"""
Performs k-means on a set of observation vectors forming k clusters.
The k-means algorithm adjusts the classification of the observations
into clusters and updates the cluster centroids until the position of
the centroids is stable over successive iterations. In this
implementation of the algorithm, the stability of the centroids is
determined by comparing the absolute value of the change in the average
Euclidean distance between the observations and their corresponding
centroids against a threshold. This yields
a code book mapping centroids to codes and vice versa.
Parameters
----------
obs : ndarray
Each row of the M by N array is an observation vector. The
columns are the features seen during each observation.
The features must be whitened first with the `whiten` function.
k_or_guess : int or ndarray
The number of centroids to generate. A code is assigned to
each centroid, which is also the row index of the centroid
in the code_book matrix generated.
The initial k centroids are chosen by randomly selecting
observations from the observation matrix. Alternatively,
passing a k by N array specifies the initial k centroids.
iter : int, optional
The number of times to run k-means, returning the codebook
with the lowest distortion. This argument is ignored if
initial centroids are specified with an array for the
``k_or_guess`` parameter. This parameter does not represent the
number of iterations of the k-means algorithm.
thresh : float, optional
Terminates the k-means algorithm if the change in
distortion since the last k-means iteration is less than
or equal to threshold.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
Seed for initializing the pseudo-random number generator.
If `seed` is None (or `numpy.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
The default is None.
Returns
-------
codebook : ndarray
A k by N array of k centroids. The ith centroid
codebook[i] is represented with the code i. The centroids
and codes generated represent the lowest distortion seen,
not necessarily the globally minimal distortion.
Note that the number of centroids is not necessarily the same as the
``k_or_guess`` parameter, because centroids assigned to no observations
are removed during iterations.
distortion : float
The mean (non-squared) Euclidean distance between the observations
passed and the centroids generated. Note the difference to the standard
definition of distortion in the context of the k-means algorithm, which
is the sum of the squared distances.
See Also
--------
kmeans2 : a different implementation of k-means clustering
with more methods for generating initial centroids but without
using a distortion change threshold as a stopping criterion.
whiten : must be called prior to passing an observation matrix
to kmeans.
Notes
-----
For more functionalities or optimal performance, you can use
`sklearn.cluster.KMeans <https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html>`_.
`This <https://hdbscan.readthedocs.io/en/latest/performance_and_scalability.html#comparison-of-high-performance-implementations>`_
is a benchmark result of several implementations.
Examples
--------
>>> from numpy import array
>>> from scipy.cluster.vq import vq, kmeans, whiten
>>> import matplotlib.pyplot as plt
>>> features = array([[ 1.9,2.3],
... [ 1.5,2.5],
... [ 0.8,0.6],
... [ 0.4,1.8],
... [ 0.1,0.1],
... [ 0.2,1.8],
... [ 2.0,0.5],
... [ 0.3,1.5],
... [ 1.0,1.0]])
>>> whitened = whiten(features)
>>> book = np.array((whitened[0],whitened[2]))
>>> kmeans(whitened,book)
(array([[ 2.3110306 , 2.86287398], # random
[ 0.93218041, 1.24398691]]), 0.85684700941625547)
>>> codes = 3
>>> kmeans(whitened,codes)
(array([[ 2.3110306 , 2.86287398], # random
[ 1.32544402, 0.65607529],
[ 0.40782893, 2.02786907]]), 0.5196582527686241)
>>> # Create 50 datapoints in two clusters a and b
>>> pts = 50
>>> rng = np.random.default_rng()
>>> a = rng.multivariate_normal([0, 0], [[4, 1], [1, 4]], size=pts)
>>> b = rng.multivariate_normal([30, 10],
... [[10, 2], [2, 1]],
... size=pts)
>>> features = np.concatenate((a, b))
>>> # Whiten data
>>> whitened = whiten(features)
>>> # Find 2 clusters in the data
>>> codebook, distortion = kmeans(whitened, 2)
>>> # Plot whitened data and cluster centers in red
>>> plt.scatter(whitened[:, 0], whitened[:, 1])
>>> plt.scatter(codebook[:, 0], codebook[:, 1], c='r')
>>> plt.show()
"""
obs = _asarray_validated(obs, check_finite=check_finite)
if iter < 1:
raise ValueError("iter must be at least 1, got %s" % iter)
# Determine whether a count (scalar) or an initial guess (array) was passed.
if not np.isscalar(k_or_guess):
guess = _asarray_validated(k_or_guess, check_finite=check_finite)
if guess.size < 1:
raise ValueError("Asked for 0 clusters. Initial book was %s" %
guess)
return _kmeans(obs, guess, thresh=thresh)
# k_or_guess is a scalar, now verify that it's an integer
k = int(k_or_guess)
if k != k_or_guess:
raise ValueError("If k_or_guess is a scalar, it must be an integer.")
if k < 1:
raise ValueError("Asked for %d clusters." % k)
rng = check_random_state(seed)
# initialize best distance value to a large value
best_dist = np.inf
for i in range(iter):
# the initial code book is randomly selected from observations
guess = _kpoints(obs, k, rng)
book, dist = _kmeans(obs, guess, thresh=thresh)
if dist < best_dist:
best_book = book
best_dist = dist
return best_book, best_dist
def _kpoints(data, k, rng):
"""Pick k points at random in data (one row = one observation).
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 are assumed to describe one
dimensional data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
rng : `numpy.random.Generator` or `numpy.random.RandomState`
Random number generator.
Returns
-------
x : ndarray
A 'k' by 'N' containing the initial centroids
"""
idx = rng.choice(data.shape[0], size=k, replace=False)
return data[idx]
def _krandinit(data, k, rng):
"""Returns k samples of a random variable whose parameters depend on data.
More precisely, it returns k observations sampled from a Gaussian random
variable whose mean and covariances are the ones estimated from the data.
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D
data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
rng : `numpy.random.Generator` or `numpy.random.RandomState`
Random number generator.
Returns
-------
x : ndarray
A 'k' by 'N' containing the initial centroids
"""
mu = data.mean(axis=0)
if data.ndim == 1:
cov = np.cov(data)
x = rng.standard_normal(size=k)
x *= np.sqrt(cov)
elif data.shape[1] > data.shape[0]:
# initialize when the covariance matrix is rank deficient
_, s, vh = np.linalg.svd(data - mu, full_matrices=False)
x = rng.standard_normal(size=(k, s.size))
sVh = s[:, None] * vh / np.sqrt(data.shape[0] - 1)
x = x.dot(sVh)
else:
cov = np.atleast_2d(np.cov(data, rowvar=False))
# k rows, d cols (one row = one obs)
# Generate k sample of a random variable ~ Gaussian(mu, cov)
x = rng.standard_normal(size=(k, mu.size))
x = x.dot(np.linalg.cholesky(cov).T)
x += mu
return x
def _kpp(data, k, rng):
""" Picks k points in the data based on the kmeans++ method.
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D
data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
rng : `numpy.random.Generator` or `numpy.random.RandomState`
Random number generator.
Returns
-------
init : ndarray
A 'k' by 'N' containing the initial centroids.
References
----------
.. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of
careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium
on Discrete Algorithms, 2007.
"""
dims = data.shape[1] if len(data.shape) > 1 else 1
init = np.ndarray((k, dims))
for i in range(k):
if i == 0:
init[i, :] = data[rng_integers(rng, data.shape[0])]
else:
D2 = cdist(init[:i,:], data, metric='sqeuclidean').min(axis=0)
probs = D2/D2.sum()
cumprobs = probs.cumsum()
r = rng.uniform()
init[i, :] = data[np.searchsorted(cumprobs, r)]
return init
_valid_init_meth = {'random': _krandinit, 'points': _kpoints, '++': _kpp}
def _missing_warn():
"""Print a warning when called."""
warnings.warn("One of the clusters is empty. "
"Re-run kmeans with a different initialization.")
def _missing_raise():
"""Raise a ClusterError when called."""
raise ClusterError("One of the clusters is empty. "
"Re-run kmeans with a different initialization.")
_valid_miss_meth = {'warn': _missing_warn, 'raise': _missing_raise}
def kmeans2(data, k, iter=10, thresh=1e-5, minit='random',
missing='warn', check_finite=True, *, seed=None):
"""
Classify a set of observations into k clusters using the k-means algorithm.
The algorithm attempts to minimize the Euclidean distance between
observations and centroids. Several initialization methods are
included.
Parameters
----------
data : ndarray
A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length
'M' array of 'M' 1-D observations.
k : int or ndarray
The number of clusters to form as well as the number of
centroids to generate. If `minit` initialization string is
'matrix', or if a ndarray is given instead, it is
interpreted as initial cluster to use instead.
iter : int, optional
Number of iterations of the k-means algorithm to run. Note
that this differs in meaning from the iters parameter to
the kmeans function.
thresh : float, optional
(not used yet)
minit : str, optional
Method for initialization. Available methods are 'random',
'points', '++' and 'matrix':
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
'points': choose k observations (rows) at random from data for
the initial centroids.
'++': choose k observations accordingly to the kmeans++ method
(careful seeding)
'matrix': interpret the k parameter as a k by M (or length k
array for 1-D data) array of initial centroids.
missing : str, optional
Method to deal with empty clusters. Available methods are
'warn' and 'raise':
'warn': give a warning and continue.
'raise': raise an ClusterError and terminate the algorithm.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
Seed for initializing the pseudo-random number generator.
If `seed` is None (or `numpy.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
The default is None.
Returns
-------
centroid : ndarray
A 'k' by 'N' array of centroids found at the last iteration of
k-means.
label : ndarray
label[i] is the code or index of the centroid the
ith observation is closest to.
See Also
--------
kmeans
References
----------
.. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of
careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium
on Discrete Algorithms, 2007.
Examples
--------
>>> from scipy.cluster.vq import kmeans2
>>> import matplotlib.pyplot as plt
Create z, an array with shape (100, 2) containing a mixture of samples
from three multivariate normal distributions.
>>> rng = np.random.default_rng()
>>> a = rng.multivariate_normal([0, 6], [[2, 1], [1, 1.5]], size=45)
>>> b = rng.multivariate_normal([2, 0], [[1, -1], [-1, 3]], size=30)
>>> c = rng.multivariate_normal([6, 4], [[5, 0], [0, 1.2]], size=25)
>>> z = np.concatenate((a, b, c))
>>> rng.shuffle(z)
Compute three clusters.
>>> centroid, label = kmeans2(z, 3, minit='points')
>>> centroid
array([[ 2.22274463, -0.61666946], # may vary
[ 0.54069047, 5.86541444],
[ 6.73846769, 4.01991898]])
How many points are in each cluster?
>>> counts = np.bincount(label)
>>> counts
array([29, 51, 20]) # may vary
Plot the clusters.
>>> w0 = z[label == 0]
>>> w1 = z[label == 1]
>>> w2 = z[label == 2]
>>> plt.plot(w0[:, 0], w0[:, 1], 'o', alpha=0.5, label='cluster 0')
>>> plt.plot(w1[:, 0], w1[:, 1], 'd', alpha=0.5, label='cluster 1')
>>> plt.plot(w2[:, 0], w2[:, 1], 's', alpha=0.5, label='cluster 2')
>>> plt.plot(centroid[:, 0], centroid[:, 1], 'k*', label='centroids')
>>> plt.axis('equal')
>>> plt.legend(shadow=True)
>>> plt.show()
"""
if int(iter) < 1:
raise ValueError("Invalid iter (%s), "
"must be a positive integer." % iter)
try:
miss_meth = _valid_miss_meth[missing]
except KeyError as e:
raise ValueError("Unknown missing method %r" % (missing,)) from e
data = _asarray_validated(data, check_finite=check_finite)
if data.ndim == 1:
d = 1
elif data.ndim == 2:
d = data.shape[1]
else:
raise ValueError("Input of rank > 2 is not supported.")
if data.size < 1:
raise ValueError("Empty input is not supported.")
# If k is not a single value, it should be compatible with data's shape
if minit == 'matrix' or not np.isscalar(k):
code_book = np.array(k, copy=True)
if data.ndim != code_book.ndim:
raise ValueError("k array doesn't match data rank")
nc = len(code_book)
if data.ndim > 1 and code_book.shape[1] != d:
raise ValueError("k array doesn't match data dimension")
else:
nc = int(k)
if nc < 1:
raise ValueError("Cannot ask kmeans2 for %d clusters"
" (k was %s)" % (nc, k))
elif nc != k:
warnings.warn("k was not an integer, was converted.")
try:
init_meth = _valid_init_meth[minit]
except KeyError as e:
raise ValueError("Unknown init method %r" % (minit,)) from e
else:
rng = check_random_state(seed)
code_book = init_meth(data, k, rng)
for i in range(iter):
# Compute the nearest neighbor for each obs using the current code book
label = vq(data, code_book)[0]
# Update the code book by computing centroids
new_code_book, has_members = _vq.update_cluster_means(data, label, nc)
if not has_members.all():
miss_meth()
# Set the empty clusters to their previous positions
new_code_book[~has_members] = code_book[~has_members]
code_book = new_code_book
return code_book, label
| bsd-3-clause |
backmari/moose | python/peacock/tests/postprocessor_tab/test_PostprocessorSelectPlugin.py | 1 | 3905 | #!/usr/bin/env python
import sys
import os
import unittest
import shutil
import time
from PyQt5 import QtCore, QtWidgets
from peacock.PostprocessorViewer.plugins.PostprocessorSelectPlugin import main
from peacock.utils import Testing
import mooseutils
class TestPostprocessorSelectPlugin(Testing.PeacockImageTestCase):
"""
Test class for the ArtistToggleWidget which toggles postprocessor lines.
"""
#: QApplication: The main App for QT, this must be static to work correctly.
qapp = QtWidgets.QApplication(sys.argv)
def setUp(self):
"""
Creates the GUI containing the ArtistGroupWidget and the matplotlib figure axes.
"""
# Filenames to load
self._filename = '{}_test.csv'.format(self.__class__.__name__)
self._filename2 = '{}_test2.csv'.format(self.__class__.__name__)
# Read the data
filenames = [self._filename, self._filename2]
self._control, self._widget, self._window = main(filenames, mooseutils.PostprocessorReader)
def copyfiles(self, partial=False):
"""
Move files into the temporary location.
"""
if partial:
shutil.copyfile('../input/white_elephant_jan_2016_partial.csv', self._filename)
else:
shutil.copyfile('../input/white_elephant_jan_2016.csv', self._filename)
shutil.copyfile('../input/postprocessor.csv', self._filename2)
for data in self._widget._data[0]:
data.load()
def tearDown(self):
"""
Remove temporary files.
"""
if os.path.exists(self._filename):
os.remove(self._filename)
if os.path.exists(self._filename2):
os.remove(self._filename2)
def testEmpty(self):
"""
Test that an empty plot is possible.
"""
self.assertImage('testEmpty.png')
def testSelect(self):
"""
Test that plotting from multiple files works.
"""
self.copyfiles()
vars = ['air_temp_set_1', 'sincos']
for i in range(len(vars)):
self._control._groups[i]._toggles[vars[i]].CheckBox.setCheckState(QtCore.Qt.Checked)
self._control._groups[i]._toggles[vars[i]].CheckBox.clicked.emit(True)
self.assertImage('testSelect.png')
def testUpdateData(self):
"""
Test that a postprocessor data updates when file is changed.
"""
self.copyfiles(partial=True)
var = 'air_temp_set_1'
self._control._groups[0]._toggles[var].CheckBox.setCheckState(QtCore.Qt.Checked)
self._control._groups[0]._toggles[var].CheckBox.clicked.emit(True)
self.assertImage('testUpdateData0.png')
# Reload the data (this would be done via a Timer)
time.sleep(1) # need to wait a bit for the modified time to change
self.copyfiles()
self.assertImage('testUpdateData1.png')
def testRepr(self):
"""
Test python scripting.
"""
self.copyfiles()
vars = ['air_temp_set_1', 'sincos']
for i in range(len(vars)):
self._control._groups[i]._toggles[vars[i]].CheckBox.setCheckState(QtCore.Qt.Checked)
self._control._groups[i]._toggles[vars[i]].CheckBox.clicked.emit(True)
output, imports = self._control.repr()
self.assertIn("data = mooseutils.PostprocessorReader('TestPostprocessorSelectPlugin_test.csv')", output)
self.assertIn("x = data('time')", output)
self.assertIn("y = data('air_temp_set_1')", output)
self.assertIn("axes0.plot(x, y, marker='', linewidth=1, color=[0.2, 0.627, 0.173, 1.0], markersize=1, linestyle='-', label='air_temp_set_1')", output)
self.assertIn("data = mooseutils.PostprocessorReader('TestPostprocessorSelectPlugin_test2.csv')", output)
if __name__ == '__main__':
unittest.main(module=__name__, verbosity=2)
| lgpl-2.1 |
ashhher3/scikit-learn | doc/sphinxext/gen_rst.py | 16 | 39657 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename) as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gp_regression_001.png': (2, 250),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
lines = open(filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery.\n"
"Please check the layout of your"
" example file:\n {}\n and make sure"
" it's correct".format(filename))
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
display: none;
}
</style>
.. _examples-index:
Examples
========
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for directory in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, directory)):
generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
lines = open(example_file).readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer" tooltip="{}">
""".format(snippet))
out.append('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
</div>
""" % (ref_name))
return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
fig.savefig(image_path % fig_mngr.num, **kwargs)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
example_code_obj = identify_names(open(example_file).read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
if app.builder.name == 'latex':
# Don't embed hyperlinks when a latex builder is used.
return
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
resolver_urls = {
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
}
for this_module, url in resolver_urls.items():
try:
doc_resolvers[this_module] = SphinxDocLinkResolver(url)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding `{0}` links due to a URL "
"Error:\n".format(this_module))
print(e.args)
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
try:
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
except (HTTPError, URLError) as e:
print("The following error has occurred:\n")
print(repr(e))
continue
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
| bsd-3-clause |
yonglehou/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
h2oai/h2o-3 | h2o-py/h2o/estimators/pca.py | 2 | 23767 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# This file is auto-generated by h2o-3/h2o-bindings/bin/gen_python.py
# Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
#
from __future__ import absolute_import, division, print_function, unicode_literals
from h2o.estimators.estimator_base import H2OEstimator
from h2o.exceptions import H2OValueError
from h2o.frame import H2OFrame
from h2o.utils.typechecks import assert_is_type, Enum, numeric
class H2OPrincipalComponentAnalysisEstimator(H2OEstimator):
"""
Principal Components Analysis
"""
algo = "pca"
supervised_learning = False
def __init__(self,
model_id=None, # type: Optional[Union[None, str, H2OEstimator]]
training_frame=None, # type: Optional[Union[None, str, H2OFrame]]
validation_frame=None, # type: Optional[Union[None, str, H2OFrame]]
ignored_columns=None, # type: Optional[List[str]]
ignore_const_cols=True, # type: bool
score_each_iteration=False, # type: bool
transform="none", # type: Literal["none", "standardize", "normalize", "demean", "descale"]
pca_method="gram_s_v_d", # type: Literal["gram_s_v_d", "power", "randomized", "glrm"]
pca_impl=None, # type: Optional[Literal["mtj_evd_densematrix", "mtj_evd_symmmatrix", "mtj_svd_densematrix", "jama"]]
k=1, # type: int
max_iterations=1000, # type: int
use_all_factor_levels=False, # type: bool
compute_metrics=True, # type: bool
impute_missing=False, # type: bool
seed=-1, # type: int
max_runtime_secs=0.0, # type: float
export_checkpoints_dir=None, # type: Optional[str]
):
"""
:param model_id: Destination id for this model; auto-generated if not specified.
Defaults to ``None``.
:type model_id: Union[None, str, H2OEstimator], optional
:param training_frame: Id of the training data frame.
Defaults to ``None``.
:type training_frame: Union[None, str, H2OFrame], optional
:param validation_frame: Id of the validation data frame.
Defaults to ``None``.
:type validation_frame: Union[None, str, H2OFrame], optional
:param ignored_columns: Names of columns to ignore for training.
Defaults to ``None``.
:type ignored_columns: List[str], optional
:param ignore_const_cols: Ignore constant columns.
Defaults to ``True``.
:type ignore_const_cols: bool
:param score_each_iteration: Whether to score during each iteration of model training.
Defaults to ``False``.
:type score_each_iteration: bool
:param transform: Transformation of training data
Defaults to ``"none"``.
:type transform: Literal["none", "standardize", "normalize", "demean", "descale"]
:param pca_method: Specify the algorithm to use for computing the principal components: GramSVD - uses a
distributed computation of the Gram matrix, followed by a local SVD; Power - computes the SVD using the
power iteration method (experimental); Randomized - uses randomized subspace iteration method; GLRM -
fits a generalized low-rank model with L2 loss function and no regularization and solves for the SVD
using local matrix algebra (experimental)
Defaults to ``"gram_s_v_d"``.
:type pca_method: Literal["gram_s_v_d", "power", "randomized", "glrm"]
:param pca_impl: Specify the implementation to use for computing PCA (via SVD or EVD): MTJ_EVD_DENSEMATRIX -
eigenvalue decompositions for dense matrix using MTJ; MTJ_EVD_SYMMMATRIX - eigenvalue decompositions for
symmetric matrix using MTJ; MTJ_SVD_DENSEMATRIX - singular-value decompositions for dense matrix using
MTJ; JAMA - eigenvalue decompositions for dense matrix using JAMA. References: JAMA -
http://math.nist.gov/javanumerics/jama/; MTJ - https://github.com/fommil/matrix-toolkits-java/
Defaults to ``None``.
:type pca_impl: Literal["mtj_evd_densematrix", "mtj_evd_symmmatrix", "mtj_svd_densematrix", "jama"], optional
:param k: Rank of matrix approximation
Defaults to ``1``.
:type k: int
:param max_iterations: Maximum training iterations
Defaults to ``1000``.
:type max_iterations: int
:param use_all_factor_levels: Whether first factor level is included in each categorical expansion
Defaults to ``False``.
:type use_all_factor_levels: bool
:param compute_metrics: Whether to compute metrics on the training data
Defaults to ``True``.
:type compute_metrics: bool
:param impute_missing: Whether to impute missing entries with the column mean
Defaults to ``False``.
:type impute_missing: bool
:param seed: RNG seed for initialization
Defaults to ``-1``.
:type seed: int
:param max_runtime_secs: Maximum allowed runtime in seconds for model training. Use 0 to disable.
Defaults to ``0.0``.
:type max_runtime_secs: float
:param export_checkpoints_dir: Automatically export generated models to this directory.
Defaults to ``None``.
:type export_checkpoints_dir: str, optional
"""
super(H2OPrincipalComponentAnalysisEstimator, self).__init__()
self._parms = {}
self._id = self._parms['model_id'] = model_id
self.training_frame = training_frame
self.validation_frame = validation_frame
self.ignored_columns = ignored_columns
self.ignore_const_cols = ignore_const_cols
self.score_each_iteration = score_each_iteration
self.transform = transform
self.pca_method = pca_method
self.pca_impl = pca_impl
self.k = k
self.max_iterations = max_iterations
self.use_all_factor_levels = use_all_factor_levels
self.compute_metrics = compute_metrics
self.impute_missing = impute_missing
self.seed = seed
self.max_runtime_secs = max_runtime_secs
self.export_checkpoints_dir = export_checkpoints_dir
@property
def training_frame(self):
"""
Id of the training data frame.
Type: ``Union[None, str, H2OFrame]``.
:examples:
>>> data = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/SDSS_quasar.txt.zip")
>>> data_pca = H2OPrincipalComponentAnalysisEstimator()
>>> data_pca.train(x=data.names, training_frame=data)
>>> data_pca.show()
"""
return self._parms.get("training_frame")
@training_frame.setter
def training_frame(self, training_frame):
self._parms["training_frame"] = H2OFrame._validate(training_frame, 'training_frame')
@property
def validation_frame(self):
"""
Id of the validation data frame.
Type: ``Union[None, str, H2OFrame]``.
:examples:
>>> data = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/SDSS_quasar.txt.zip")
>>> train, valid = data.split_frame(ratios=[.8], seed=1234)
>>> model_pca = H2OPrincipalComponentAnalysisEstimator(impute_missing=True)
>>> model_pca.train(x=data.names,
... training_frame=train,
... validation_frame=valid)
>>> model_pca.show()
"""
return self._parms.get("validation_frame")
@validation_frame.setter
def validation_frame(self, validation_frame):
self._parms["validation_frame"] = H2OFrame._validate(validation_frame, 'validation_frame')
@property
def ignored_columns(self):
"""
Names of columns to ignore for training.
Type: ``List[str]``.
"""
return self._parms.get("ignored_columns")
@ignored_columns.setter
def ignored_columns(self, ignored_columns):
assert_is_type(ignored_columns, None, [str])
self._parms["ignored_columns"] = ignored_columns
@property
def ignore_const_cols(self):
"""
Ignore constant columns.
Type: ``bool``, defaults to ``True``.
:examples:
>>> prostate = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip")
>>> prostate['CAPSULE'] = prostate['CAPSULE'].asfactor()
>>> prostate['RACE'] = prostate['RACE'].asfactor()
>>> prostate['DCAPS'] = prostate['DCAPS'].asfactor()
>>> prostate['DPROS'] = prostate['DPROS'].asfactor()
>>> pros_pca = H2OPrincipalComponentAnalysisEstimator(ignore_const_cols=False)
>>> pros_pca.train(x=prostate.names, training_frame=prostate)
>>> pros_pca.show()
"""
return self._parms.get("ignore_const_cols")
@ignore_const_cols.setter
def ignore_const_cols(self, ignore_const_cols):
assert_is_type(ignore_const_cols, None, bool)
self._parms["ignore_const_cols"] = ignore_const_cols
@property
def score_each_iteration(self):
"""
Whether to score during each iteration of model training.
Type: ``bool``, defaults to ``False``.
:examples:
>>> data = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/SDSS_quasar.txt.zip")
>>> data_pca = H2OPrincipalComponentAnalysisEstimator(k=3,
... score_each_iteration=True,
... seed=1234,
... impute_missing=True)
>>> data_pca.train(x=data.names, training_frame=data)
>>> data_pca.show()
"""
return self._parms.get("score_each_iteration")
@score_each_iteration.setter
def score_each_iteration(self, score_each_iteration):
assert_is_type(score_each_iteration, None, bool)
self._parms["score_each_iteration"] = score_each_iteration
@property
def transform(self):
"""
Transformation of training data
Type: ``Literal["none", "standardize", "normalize", "demean", "descale"]``, defaults to ``"none"``.
:examples:
>>> data = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/SDSS_quasar.txt.zip")
>>> data_pca = H2OPrincipalComponentAnalysisEstimator(k=-1,
... transform="standardize",
... pca_method="power",
... impute_missing=True,
... max_iterations=800)
>>> data_pca.train(x=data.names, training_frame=data)
>>> data_pca.show()
"""
return self._parms.get("transform")
@transform.setter
def transform(self, transform):
assert_is_type(transform, None, Enum("none", "standardize", "normalize", "demean", "descale"))
self._parms["transform"] = transform
@property
def pca_method(self):
"""
Specify the algorithm to use for computing the principal components: GramSVD - uses a distributed computation of
the Gram matrix, followed by a local SVD; Power - computes the SVD using the power iteration method
(experimental); Randomized - uses randomized subspace iteration method; GLRM - fits a generalized low-rank model
with L2 loss function and no regularization and solves for the SVD using local matrix algebra (experimental)
Type: ``Literal["gram_s_v_d", "power", "randomized", "glrm"]``, defaults to ``"gram_s_v_d"``.
:examples:
>>> data = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/SDSS_quasar.txt.zip")
>>> data_pca = H2OPrincipalComponentAnalysisEstimator(k=-1,
... transform="standardize",
... pca_method="power",
... impute_missing=True,
... max_iterations=800)
>>> data_pca.train(x=data.names, training_frame=data)
>>> data_pca.show()
"""
return self._parms.get("pca_method")
@pca_method.setter
def pca_method(self, pca_method):
assert_is_type(pca_method, None, Enum("gram_s_v_d", "power", "randomized", "glrm"))
self._parms["pca_method"] = pca_method
@property
def pca_impl(self):
"""
Specify the implementation to use for computing PCA (via SVD or EVD): MTJ_EVD_DENSEMATRIX - eigenvalue
decompositions for dense matrix using MTJ; MTJ_EVD_SYMMMATRIX - eigenvalue decompositions for symmetric matrix
using MTJ; MTJ_SVD_DENSEMATRIX - singular-value decompositions for dense matrix using MTJ; JAMA - eigenvalue
decompositions for dense matrix using JAMA. References: JAMA - http://math.nist.gov/javanumerics/jama/; MTJ -
https://github.com/fommil/matrix-toolkits-java/
Type: ``Literal["mtj_evd_densematrix", "mtj_evd_symmmatrix", "mtj_svd_densematrix", "jama"]``.
:examples:
>>> data = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/SDSS_quasar.txt.zip")
>>> data_pca = H2OPrincipalComponentAnalysisEstimator(k=3,
... pca_impl="jama",
... impute_missing=True,
... max_iterations=1200)
>>> data_pca.train(x=data.names, training_frame=data)
>>> data_pca.show()
"""
return self._parms.get("pca_impl")
@pca_impl.setter
def pca_impl(self, pca_impl):
assert_is_type(pca_impl, None, Enum("mtj_evd_densematrix", "mtj_evd_symmmatrix", "mtj_svd_densematrix", "jama"))
self._parms["pca_impl"] = pca_impl
@property
def k(self):
"""
Rank of matrix approximation
Type: ``int``, defaults to ``1``.
:examples:
>>> data = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/SDSS_quasar.txt.zip")
>>> data_pca = H2OPrincipalComponentAnalysisEstimator(k=-1,
... transform="standardize",
... pca_method="power",
... impute_missing=True,
... max_iterations=800)
>>> data_pca.train(x=data.names, training_frame=data)
>>> data_pca.show()
"""
return self._parms.get("k")
@k.setter
def k(self, k):
assert_is_type(k, None, int)
self._parms["k"] = k
@property
def max_iterations(self):
"""
Maximum training iterations
Type: ``int``, defaults to ``1000``.
:examples:
>>> data = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/SDSS_quasar.txt.zip")
>>> data_pca = H2OPrincipalComponentAnalysisEstimator(k=-1,
... transform="standardize",
... pca_method="power",
... impute_missing=True,
... max_iterations=800)
>>> data_pca.train(x=data.names, training_frame=data)
>>> data_pca.show()
"""
return self._parms.get("max_iterations")
@max_iterations.setter
def max_iterations(self, max_iterations):
assert_is_type(max_iterations, None, int)
self._parms["max_iterations"] = max_iterations
@property
def use_all_factor_levels(self):
"""
Whether first factor level is included in each categorical expansion
Type: ``bool``, defaults to ``False``.
:examples:
>>> data = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/SDSS_quasar.txt.zip")
>>> data_pca = H2OPrincipalComponentAnalysisEstimator(k=3,
... use_all_factor_levels=True,
... seed=1234)
>>> data_pca.train(x=data.names, training_frame=data)
>>> data_pca.show()
"""
return self._parms.get("use_all_factor_levels")
@use_all_factor_levels.setter
def use_all_factor_levels(self, use_all_factor_levels):
assert_is_type(use_all_factor_levels, None, bool)
self._parms["use_all_factor_levels"] = use_all_factor_levels
@property
def compute_metrics(self):
"""
Whether to compute metrics on the training data
Type: ``bool``, defaults to ``True``.
:examples:
>>> prostate = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip")
>>> prostate['CAPSULE'] = prostate['CAPSULE'].asfactor()
>>> prostate['RACE'] = prostate['RACE'].asfactor()
>>> prostate['DCAPS'] = prostate['DCAPS'].asfactor()
>>> prostate['DPROS'] = prostate['DPROS'].asfactor()
>>> pros_pca = H2OPrincipalComponentAnalysisEstimator(compute_metrics=False)
>>> pros_pca.train(x=prostate.names, training_frame=prostate)
>>> pros_pca.show()
"""
return self._parms.get("compute_metrics")
@compute_metrics.setter
def compute_metrics(self, compute_metrics):
assert_is_type(compute_metrics, None, bool)
self._parms["compute_metrics"] = compute_metrics
@property
def impute_missing(self):
"""
Whether to impute missing entries with the column mean
Type: ``bool``, defaults to ``False``.
:examples:
>>> prostate = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip")
>>> prostate['CAPSULE'] = prostate['CAPSULE'].asfactor()
>>> prostate['RACE'] = prostate['RACE'].asfactor()
>>> prostate['DCAPS'] = prostate['DCAPS'].asfactor()
>>> prostate['DPROS'] = prostate['DPROS'].asfactor()
>>> pros_pca = H2OPrincipalComponentAnalysisEstimator(impute_missing=True)
>>> pros_pca.train(x=prostate.names, training_frame=prostate)
>>> pros_pca.show()
"""
return self._parms.get("impute_missing")
@impute_missing.setter
def impute_missing(self, impute_missing):
assert_is_type(impute_missing, None, bool)
self._parms["impute_missing"] = impute_missing
@property
def seed(self):
"""
RNG seed for initialization
Type: ``int``, defaults to ``-1``.
:examples:
>>> data = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/SDSS_quasar.txt.zip")
>>> data_pca = H2OPrincipalComponentAnalysisEstimator(k=3,
... seed=1234,
... impute_missing=True)
>>> data_pca.train(x=data.names, training_frame=data)
>>> data_pca.show()
"""
return self._parms.get("seed")
@seed.setter
def seed(self, seed):
assert_is_type(seed, None, int)
self._parms["seed"] = seed
@property
def max_runtime_secs(self):
"""
Maximum allowed runtime in seconds for model training. Use 0 to disable.
Type: ``float``, defaults to ``0.0``.
:examples:
>>> data = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/SDSS_quasar.txt.zip")
>>> data_pca = H2OPrincipalComponentAnalysisEstimator(k=-1,
... transform="standardize",
... pca_method="power",
... impute_missing=True,
... max_iterations=800
... max_runtime_secs=15)
>>> data_pca.train(x=data.names, training_frame=data)
>>> data_pca.show()
"""
return self._parms.get("max_runtime_secs")
@max_runtime_secs.setter
def max_runtime_secs(self, max_runtime_secs):
assert_is_type(max_runtime_secs, None, numeric)
self._parms["max_runtime_secs"] = max_runtime_secs
@property
def export_checkpoints_dir(self):
"""
Automatically export generated models to this directory.
Type: ``str``.
:examples:
>>> import tempfile
>>> from os import listdir
>>> prostate = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip")
>>> prostate['CAPSULE'] = prostate['CAPSULE'].asfactor()
>>> prostate['RACE'] = prostate['RACE'].asfactor()
>>> prostate['DCAPS'] = prostate['DCAPS'].asfactor()
>>> prostate['DPROS'] = prostate['DPROS'].asfactor()
>>> checkpoints_dir = tempfile.mkdtemp()
>>> pros_pca = H2OPrincipalComponentAnalysisEstimator(impute_missing=True,
... export_checkpoints_dir=checkpoints_dir)
>>> pros_pca.train(x=prostate.names, training_frame=prostate)
>>> len(listdir(checkpoints_dir))
"""
return self._parms.get("export_checkpoints_dir")
@export_checkpoints_dir.setter
def export_checkpoints_dir(self, export_checkpoints_dir):
assert_is_type(export_checkpoints_dir, None, str)
self._parms["export_checkpoints_dir"] = export_checkpoints_dir
def init_for_pipeline(self):
"""
Returns H2OPCA object which implements fit and transform method to be used in sklearn.Pipeline properly.
All parameters defined in self.__params, should be input parameters in H2OPCA.__init__ method.
:returns: H2OPCA object
:examples:
>>> from sklearn.pipeline import Pipeline
>>> from h2o.transforms.preprocessing import H2OScaler
>>> from h2o.estimators import H2ORandomForestEstimator
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
>>> pipe = Pipeline([("standardize", H2OScaler()),
... ("pca", H2OPrincipalComponentAnalysisEstimator(k=2).init_for_pipeline()),
... ("rf", H2ORandomForestEstimator(seed=42,ntrees=5))])
>>> pipe.fit(iris[:4], iris[4])
"""
import inspect
from h2o.transforms.decomposition import H2OPCA
# check which parameters can be passed to H2OPCA init
var_names = list(dict(inspect.getmembers(H2OPCA.__init__.__code__))['co_varnames'])
parameters = {k: v for k, v in self._parms.items() if k in var_names}
return H2OPCA(**parameters)
| apache-2.0 |
IshankGulati/scikit-learn | examples/neural_networks/plot_mlp_training_curves.py | 58 | 3692 | """
========================================================
Compare Stochastic learning strategies for MLPClassifier
========================================================
This example visualizes some training loss curves for different stochastic
learning strategies, including SGD and Adam. Because of time-constraints, we
use several small datasets, for which L-BFGS might be more suitable. The
general trend shown in these examples seems to carry over to larger datasets,
however.
Note that those results can be highly dependent on the value of
``learning_rate_init``.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn import datasets
# different learning rate schedules and momentum parameters
params = [{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': 0,
'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': 0,
'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'solver': 'adam', 'learning_rate_init': 0.01}]
labels = ["constant learning-rate", "constant with momentum",
"constant with Nesterov's momentum",
"inv-scaling learning-rate", "inv-scaling with momentum",
"inv-scaling with Nesterov's momentum", "adam"]
plot_args = [{'c': 'red', 'linestyle': '-'},
{'c': 'green', 'linestyle': '-'},
{'c': 'blue', 'linestyle': '-'},
{'c': 'red', 'linestyle': '--'},
{'c': 'green', 'linestyle': '--'},
{'c': 'blue', 'linestyle': '--'},
{'c': 'black', 'linestyle': '-'}]
def plot_on_dataset(X, y, ax, name):
# for each dataset, plot learning for each learning strategy
print("\nlearning on dataset %s" % name)
ax.set_title(name)
X = MinMaxScaler().fit_transform(X)
mlps = []
if name == "digits":
# digits is larger but converges fairly quickly
max_iter = 15
else:
max_iter = 400
for label, param in zip(labels, params):
print("training: %s" % label)
mlp = MLPClassifier(verbose=0, random_state=0,
max_iter=max_iter, **param)
mlp.fit(X, y)
mlps.append(mlp)
print("Training set score: %f" % mlp.score(X, y))
print("Training set loss: %f" % mlp.loss_)
for mlp, label, args in zip(mlps, labels, plot_args):
ax.plot(mlp.loss_curve_, label=label, **args)
fig, axes = plt.subplots(2, 2, figsize=(15, 10))
# load / generate some toy datasets
iris = datasets.load_iris()
digits = datasets.load_digits()
data_sets = [(iris.data, iris.target),
(digits.data, digits.target),
datasets.make_circles(noise=0.2, factor=0.5, random_state=1),
datasets.make_moons(noise=0.3, random_state=0)]
for ax, data, name in zip(axes.ravel(), data_sets, ['iris', 'digits',
'circles', 'moons']):
plot_on_dataset(*data, ax=ax, name=name)
fig.legend(ax.get_lines(), labels=labels, ncol=3, loc="upper center")
plt.show()
| bsd-3-clause |
mauzeh/formation-flight | runs/multihub/n_s/plot_line.py | 1 | 2725 | import config
from lib.util import tsv_get_column_index
from run import get_matrix_dimensions
import os
import math
from lib.util import make_sure_path_exists
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
config.sink_dir = '%s/sink' % os.path.dirname(__file__)
config.axis_x = {
'name' : r'$H$',
'column' : 'config_count_hubs'
}
config.axis_y = {
'name' : r'$s$',
'column' : 'config_etah_slack'
}
config.output_nx, config.output_ny = get_matrix_dimensions()
config.interesting_z_axes = [{
'name' : 'Average Formation Size',
'column' : 'avg_formation_size'
}]
def run():
data_file = '%s/latest.tsv' % config.sink_dir
data = np.loadtxt(
open(data_file, 'rb'),
delimiter = "\t",
skiprows = 1
)
axis_x = config.axis_x
axis_y = config.axis_y
for axis_z in config.interesting_z_axes:
plt.figure()
x = data[:, tsv_get_column_index(data_file, axis_x['column'])]
y = data[:, tsv_get_column_index(data_file, axis_y['column'])]
z = data[:, tsv_get_column_index(data_file, axis_z['column'])]
# Note that we must convert the lock time into the lock distance L
if axis_x['column'] == 'config_lock_time':
x = 300 * x / 60
if axis_y['column'] == 'config_lock_time':
y = 300 * y / 60
try:
nx = config.output_nx
ny = config.output_ny
except AttributeError:
N = len(z)
nx = math.sqrt(N)
ny = nx
#
#print 'variable: %s, nx = %d, ny = %d, count z = %d. z = %s' % (
# axis_z['column'],
# nx, ny, len(z), z
#)
x = x.reshape(nx, ny)
y = y.reshape(nx, ny)
z = z.reshape(nx, ny)
plt.xlabel(axis_x['name'])
plt.ylabel(axis_y['name'])
print x,y,z
print x[:,0]
print y[0,:]
print z[0,:]
plt.plot(y[0,:],z[0,:])
plt.show()
return
plt.grid(True)
try:
cs = plt.contour(x, y, z, axis_z['levels'])
except KeyError:
cs = plt.contour(x, y, z, 10)
plt.clabel(cs)
plt.colorbar()
#plt.title(r'%s ($n=%d$)' % (axis_z['name'], config.count_hubs))
plt.title(r'%s' % axis_z['name'])
fig_path = '%s/plot_%s.pdf' % (config.sink_dir, axis_z['column'])
fig_path = fig_path.replace('/runs/', '/plots/')
fig_path = fig_path.replace('/sink/', '/')
make_sure_path_exists(os.path.dirname(fig_path))
#plt.show()
#plt.savefig(fig_path)
| mit |
YinongLong/scikit-learn | examples/manifold/plot_manifold_sphere.py | 31 | 5118 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`sphx_glr_auto_examples_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <https://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <[email protected]>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
try:
# compatibility matplotlib < 1.0
ax.view_init(40, -10)
except:
pass
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform t-distributed stochastic neighbor embedding.
t0 = time()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
trans_data = tsne.fit_transform(sphere_data).T
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(2, 5, 10)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
Obus/scikit-learn | sklearn/learning_curve.py | 110 | 13467 | """Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
aisthesis/pynance | pynance/data/prep.py | 2 | 7374 | """
.. Copyright (c) 2014- Marshall Farrier
license http://opensource.org/licenses/MIT
Data - preprocessing functions (:mod:`pynance.data.prep`)
=========================================================
.. currentmodule:: pynance.data.prep
"""
import numpy as np
import pandas as pd
def center(dataset, out=None):
"""
Returns a centered data set.
Each column of the returned data will have mean 0.
The row vector subtracted from each row to achieve this
transformation is also returned.
Parameters
----------
dataset : DataFrame or ndarray
out : DataFrame or ndarray, optional
Alternate output array in which to place the result.
If provided, it must have the same shape and type
(DataFrame or ndarray) as the expected output.
Returns
----------
out : tuple of DataFrame or ndarray
The output data is of the same type as the input.
Notes
----------
To exclude a column (such as a constant feature, which is
usually the first or last column of data) simply don't
include it in the input. For example:
>>> centered_data, means = pn.center(mydata.iloc[:, 1:])
To perform this operation in place:
>>> _, means = pn.center(mydata.iloc[:, 1:], out=mydata.iloc:, 1:])
"""
return _preprocess(_center_fn, dataset, out)
def _preprocess(func, dataset, out):
# Generic preprocessing function used in center() and normalize()
is_df = isinstance(dataset, pd.DataFrame)
_data = (dataset.values if is_df else dataset)
processed_data, adjustment = func(_data)
if not is_df:
if out is not None:
out[:, :] = processed_data
return out, adjustment
return processed_data, adjustment
adj_df = pd.DataFrame(data=adjustment, index=['Mean'], columns=dataset.columns,
dtype='float64')
if out is not None:
out.values[:, :] = processed_data
return out, adj_df
processed_df = pd.DataFrame(data=processed_data, index=dataset.index,
columns=dataset.columns, dtype='float64')
return processed_df, adj_df
def _center_fn(_data):
adjustment = np.mean(_data, axis=0, dtype=np.float64).reshape((1, _data.shape[1]))
centered_data = _data - adjustment
return centered_data, adjustment
def _normalize_fn(_data):
adjustment = np.std(_data, axis=0, dtype=np.float64).reshape((1, _data.shape[1]))
normalized_data = _data / adjustment
return normalized_data, adjustment
def normalize(centered_data, out=None):
"""
Returns a data set with standard deviation of 1.
The input data must be centered for the operation to
yield valid results: The mean of each column must be 0.
Each column of the returned data set will have standard
deviation 1.
The row vector by which each row of data is divided is
also returned.
Parameters
----------
centered_data : DataFrame or ndarray
out : DataFrame or ndarray, optional
Alternate output array in which to place the result.
If provided, it must have the same shape and type
(DataFrame or ndarray) as the expected output.
Returns
----------
out : tuple of DataFrame or ndarray
The output data is of the same type as the input.
Notes
----------
To exclude a column (such as a constant feature, which is
usually the first or last column of data) simply don't
include it in the input. For example:
>>> normalized_data, sd_adj = pn.normalize(mydata.iloc[:, 1:])
To perform this operation in place:
>>> _, sd_adj = pn.normalize(mydata.iloc[:, 1:], out=mydata.iloc:, 1:])
"""
return _preprocess(_normalize_fn, centered_data, out)
def transform(data_frame, **kwargs):
"""
Return a transformed DataFrame.
Transform data_frame along the given axis. By default, each row will be normalized (axis=0).
Parameters
-----------
data_frame : DataFrame
Data to be normalized.
axis : int, optional
0 (default) to normalize each row, 1 to normalize each column.
method : str, optional
Valid methods are:
- "vector" : Default for normalization by row (axis=0).
Normalize along axis as a vector with norm `norm`
- "last" : Linear normalization setting last value along the axis to `norm`
- "first" : Default for normalization of columns (axis=1).
Linear normalization setting first value along the given axis to `norm`
- "mean" : Normalize so that the mean of each vector along the given axis is `norm`
norm : float, optional
Target value of normalization, defaults to 1.0.
labels : DataFrame, optional
Labels may be passed as keyword argument, in which
case the label values will also be normalized and returned.
Returns
-----------
df : DataFrame
Normalized data.
labels : DataFrame, optional
Normalized labels, if provided as input.
Notes
-----------
If labels are real-valued, they should also be normalized.
..
Having row_norms as a numpy array should be benchmarked against
using a DataFrame:
http://stackoverflow.com/questions/12525722/normalize-data-in-pandas
Note: This isn't a bottleneck. Using a feature set with 13k rows and 256
data_frame ('ge' from 1962 until now), the normalization was immediate.
"""
norm = kwargs.get('norm', 1.0)
axis = kwargs.get('axis', 0)
if axis == 0:
norm_vector = _get_norms_of_rows(data_frame, kwargs.get('method', 'vector'))
else:
norm_vector = _get_norms_of_cols(data_frame, kwargs.get('method', 'first'))
if 'labels' in kwargs:
if axis == 0:
return data_frame.apply(lambda col: col * norm / norm_vector, axis=0), \
kwargs['labels'].apply(lambda col: col * norm / norm_vector, axis=0)
else:
raise ValueError("label normalization incompatible with normalization by column")
else:
if axis == 0:
return data_frame.apply(lambda col: col * norm / norm_vector, axis=0)
else:
return data_frame.apply(lambda row: row * norm / norm_vector, axis=1)
def _get_norms_of_rows(data_frame, method):
""" return a column vector containing the norm of each row """
if method == 'vector':
norm_vector = np.linalg.norm(data_frame.values, axis=1)
elif method == 'last':
norm_vector = data_frame.iloc[:, -1].values
elif method == 'mean':
norm_vector = np.mean(data_frame.values, axis=1)
elif method == 'first':
norm_vector = data_frame.iloc[:, 0].values
else:
raise ValueError("no normalization method '{0}'".format(method))
return norm_vector
def _get_norms_of_cols(data_frame, method):
""" return a row vector containing the norm of each column """
if method == 'first':
norm_vector = data_frame.iloc[0, :].values
elif method == 'mean':
norm_vector = np.mean(data_frame.values, axis=0)
elif method == 'last':
norm_vector = data_frame.iloc[-1, :].values
elif method == 'vector':
norm_vector = np.linalg.norm(data_frame.values, axis=0)
else:
raise ValueError("no normalization method '{0}'".format(method))
return norm_vector
| mit |
NunoEdgarGub1/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 268 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/mpl_examples/pylab_examples/custom_cmap.py | 3 | 4967 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
"""
Example: suppose you want red to increase from 0 to 1 over the bottom
half, green to do the same over the middle half, and blue over the top
half. Then you would use:
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0))}
If, as in this example, there are no discontinuities in the r, g, and b
components, then it is quite simple: the second and third element of
each tuple, above, is the same--call it "y". The first element ("x")
defines interpolation intervals over the full range of 0 to 1, and it
must span that whole range. In other words, the values of x divide the
0-to-1 range into a set of segments, and y gives the end-point color
values for each segment.
Now consider the green. cdict['green'] is saying that for
0 <= x <= 0.25, y is zero; no green.
0.25 < x <= 0.75, y varies linearly from 0 to 1.
x > 0.75, y remains at 1, full green.
If there are discontinuities, then it is a little more complicated.
Label the 3 elements in each row in the cdict entry for a given color as
(x, y0, y1). Then for values of x between x[i] and x[i+1] the color
value is interpolated between y1[i] and y0[i+1].
Going back to the cookbook example, look at cdict['red']; because y0 !=
y1, it is saying that for x from 0 to 0.5, red increases from 0 to 1,
but then it jumps down, so that for x from 0.5 to 1, red increases from
0.7 to 1. Green ramps from 0 to 1 as x goes from 0 to 0.5, then jumps
back to 0, and ramps back to 1 as x goes from 0.5 to 1.
row i: x y0 y1
/
/
row i+1: x y0 y1
Above is an attempt to show that for x in the range x[i] to x[i+1], the
interpolation is between y1[i] and y0[i+1]. So, y0[0] and y1[-1] are
never used.
"""
cdict1 = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0))
}
cdict2 = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 1.0),
(1.0, 0.1, 1.0)),
'green': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.1),
(0.5, 1.0, 0.0),
(1.0, 0.0, 0.0))
}
cdict3 = {'red': ((0.0, 0.0, 0.0),
(0.25,0.0, 0.0),
(0.5, 0.8, 1.0),
(0.75,1.0, 1.0),
(1.0, 0.4, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.25,0.0, 0.0),
(0.5, 0.9, 0.9),
(0.75,0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.4),
(0.25,1.0, 1.0),
(0.5, 1.0, 0.8),
(0.75,0.0, 0.0),
(1.0, 0.0, 0.0))
}
# Now we will use this example to illustrate 3 ways of
# handling custom colormaps.
# First, the most direct and explicit:
blue_red1 = LinearSegmentedColormap('BlueRed1', cdict1)
# Second, create the map explicitly and register it.
# Like the first method, this method works with any kind
# of Colormap, not just
# a LinearSegmentedColormap:
blue_red2 = LinearSegmentedColormap('BlueRed2', cdict2)
plt.register_cmap(cmap=blue_red2)
# Third, for LinearSegmentedColormap only,
# leave everything to register_cmap:
plt.register_cmap(name='BlueRed3', data=cdict3) # optional lut kwarg
x = np.arange(0, np.pi, 0.1)
y = np.arange(0, 2*np.pi, 0.1)
X, Y = np.meshgrid(x,y)
Z = np.cos(X) * np.sin(Y)
plt.figure(figsize=(10,4))
plt.subplots_adjust(wspace=0.3)
plt.subplot(1,3,1)
plt.imshow(Z, interpolation='nearest', cmap=blue_red1)
plt.colorbar()
plt.subplot(1,3,2)
cmap = plt.get_cmap('BlueRed2')
plt.imshow(Z, interpolation='nearest', cmap=cmap)
plt.colorbar()
# Now we will set the third cmap as the default. One would
# not normally do this in the middle of a script like this;
# it is done here just to illustrate the method.
plt.rcParams['image.cmap'] = 'BlueRed3'
# Also see below for an alternative, particularly for
# interactive use.
plt.subplot(1,3,3)
plt.imshow(Z, interpolation='nearest')
plt.colorbar()
# Or as yet another variation, we could replace the rcParams
# specification *before* the imshow with the following *after*
# imshow:
#
# plt.set_cmap('BlueRed3')
#
# This sets the new default *and* sets the colormap of the last
# image-like item plotted via pyplot, if any.
plt.suptitle('Custom Blue-Red colormaps')
plt.show()
| gpl-2.0 |
hamedhsn/incubator-airflow | tests/contrib/hooks/test_bigquery_hook.py | 16 | 8098 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from airflow.contrib.hooks import bigquery_hook as hook
from oauth2client.contrib.gce import HttpAccessTokenRefreshError
bq_available = True
try:
hook.BigQueryHook().get_service()
except HttpAccessTokenRefreshError:
bq_available = False
class TestBigQueryDataframeResults(unittest.TestCase):
def setUp(self):
self.instance = hook.BigQueryHook()
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_output_is_dataframe_with_valid_query(self):
import pandas as pd
df = self.instance.get_pandas_df('select 1')
self.assertIsInstance(df, pd.DataFrame)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_throws_exception_with_invalid_query(self):
with self.assertRaises(Exception) as context:
self.instance.get_pandas_df('from `1`')
self.assertIn('pandas_gbq.gbq.GenericGBQException: Reason: invalidQuery',
str(context.exception), "")
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_suceeds_with_explicit_legacy_query(self):
df = self.instance.get_pandas_df('select 1', dialect='legacy')
self.assertEqual(df.iloc(0)[0][0], 1)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_suceeds_with_explicit_std_query(self):
df = self.instance.get_pandas_df('select * except(b) from (select 1 a, 2 b)', dialect='standard')
self.assertEqual(df.iloc(0)[0][0], 1)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_throws_exception_with_incompatible_syntax(self):
with self.assertRaises(Exception) as context:
self.instance.get_pandas_df('select * except(b) from (select 1 a, 2 b)', dialect='legacy')
self.assertIn('pandas_gbq.gbq.GenericGBQException: Reason: invalidQuery',
str(context.exception), "")
class TestBigQueryTableSplitter(unittest.TestCase):
def test_internal_need_default_project(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('dataset.table', None)
self.assertIn('INTERNAL: No default project is specified',
str(context.exception), "")
def test_split_dataset_table(self):
project, dataset, table = hook._split_tablename('dataset.table',
'project')
self.assertEqual("project", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_split_project_dataset_table(self):
project, dataset, table = hook._split_tablename('alternative:dataset.table',
'project')
self.assertEqual("alternative", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_sql_split_project_dataset_table(self):
project, dataset, table = hook._split_tablename('alternative.dataset.table',
'project')
self.assertEqual("alternative", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_colon_in_project(self):
project, dataset, table = hook._split_tablename('alt1:alt.dataset.table',
'project')
self.assertEqual('alt1:alt', project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_valid_double_column(self):
project, dataset, table = hook._split_tablename('alt1:alt:dataset.table',
'project')
self.assertEqual('alt1:alt', project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_invalid_syntax_triple_colon(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt3:dataset.table',
'project')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertFalse('Format exception for' in str(context.exception))
def test_invalid_syntax_triple_dot(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1.alt.dataset.table',
'project')
self.assertIn('Expect format of (<project.|<project:)<dataset>.<table>',
str(context.exception), "")
self.assertFalse('Format exception for' in str(context.exception))
def test_invalid_syntax_column_double_project_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt.dataset.table',
'project', 'var_x')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
def test_invalid_syntax_triple_colon_project_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt:dataset.table',
'project', 'var_x')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
def test_invalid_syntax_triple_dot_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1.alt.dataset.table',
'project', 'var_x')
self.assertIn('Expect format of (<project.|<project:)<dataset>.<table>',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
class TestBigQueryHookSourceFormat(unittest.TestCase):
def test_invalid_source_format(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load("test.test", "test_schema.json", ["test_data.json"], source_format="json")
# since we passed 'json' in, and it's not valid, make sure it's present in the error string.
self.assertIn("JSON", str(context.exception))
class TestBigQueryBaseCursor(unittest.TestCase):
def test_invalid_schema_update_options(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=["THIS IS NOT VALID"]
)
self.assertIn("THIS IS NOT VALID", str(context.exception))
def test_invalid_schema_update_and_write_disposition(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=['ALLOW_FIELD_ADDITION'],
write_disposition='WRITE_EMPTY'
)
self.assertIn("schema_update_options is only", str(context.exception))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
joosthoeks/jhTAlib | example/example-6-plot-quandl.py | 1 | 3211 | #!/usr/bin/env python
import quandl
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import jhtalib as jhta
def main():
# quandl_data = quandl.get('BCHARTS/BITSTAMPUSD', order='asc', collapse='daily', returns='numpy', authtoken='YOUR_AUTH_TOKEN')
quandl_data = quandl.get('BCHARTS/BITSTAMPUSD', order='asc', collapse='daily', returns='numpy')
df = {'datetime': [], 'Open': [], 'High': [], 'Low': [], 'Close': [], 'Volume': []}
i = 0
while i < len(quandl_data['Close']):
# df['datetime'].append(i)
df['datetime'].append(quandl_data['Date'][i])
df['Open'].append(float(quandl_data['Open'][i]))
df['High'].append(float(quandl_data['High'][i]))
df['Low'].append(float(quandl_data['Low'][i]))
df['Close'].append(float(quandl_data['Close'][i]))
df['Volume'].append(int(quandl_data['Volume (BTC)'][i]))
i += 1
x = df['datetime']
sma_list = jhta.SMA(df, 200)
mmr_list = jhta.MMR(df)
mmr_mean_list = jhta.MEAN({'mmr': mmr_list}, len(mmr_list), 'mmr')
mom_list = jhta.MOM(df, 365)
mom_mean_list = jhta.MEAN({'mom': mom_list}, len(mom_list), 'mom')
print ('Calculated from %i data points:' % len(x))
print ('Last Close: %f' % df['Close'][-1])
print ('Last SMA 200: %f' % sma_list[-1])
print ('Last MMR: %f' % mmr_list[-1])
print ('Last MEAN MMR: %f' % mmr_mean_list[-1])
print ('Last MOM 365: %f' % mom_list[-1])
print ('Last MEAN MOM 365: %f' % mom_mean_list[-1])
# left = 365
# right = len(x)
# print ('Plot starts from %i until %i in Log scale:' % (left, right))
plt.figure(1, (30, 10))
plt.subplot(311)
plt.title('Time / Price / Ratio')
plt.xlabel('Time')
plt.ylabel('Price')
plt.grid(True)
plt.plot(x, df['Close'], color='blue')
plt.plot(x, sma_list, color='red')
plt.legend(['Close', 'SMA 200'], loc='upper left')
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
plt.gca().xaxis.set_major_locator(mdates.YearLocator())
plt.gcf().autofmt_xdate()
# plt.xlim(left=left, right=right)
plt.yscale('log')
plt.subplot(312)
plt.xlabel('Time')
plt.ylabel('Ratio')
plt.grid(True)
plt.plot(x, [1] * len(x), color='red')
plt.plot(x, mmr_list)
plt.plot(x, mmr_mean_list)
plt.plot(x, [2.4] * len(x))
plt.legend(['SMA 200', 'MMR', 'MEAN MMR', 'THRESHOLD 2.4'], loc='upper left')
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
plt.gca().xaxis.set_major_locator(mdates.YearLocator())
plt.gcf().autofmt_xdate()
# plt.xlim(left=left, right=right)
plt.yscale('log')
plt.subplot(313)
plt.xlabel('Time')
plt.ylabel('Ratio')
plt.grid(True)
plt.plot(x, [0] * len(x), color='blue')
plt.plot(x, mom_list)
plt.plot(x, mom_mean_list)
plt.legend(['Price', 'MOM 365', 'MEAN MOM 365'], loc='upper left')
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
plt.gca().xaxis.set_major_locator(mdates.YearLocator())
plt.gcf().autofmt_xdate()
# plt.xlim(left=left, right=right)
plt.yscale('symlog')
plt.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
hainm/scikit-learn | sklearn/semi_supervised/label_propagation.py | 128 | 15312 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
andaag/scikit-learn | sklearn/manifold/tests/test_spectral_embedding.py | 216 | 8091 | from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
| bsd-3-clause |
santis19/fatiando | gallery/gridder/profile.py | 6 | 1310 | """
Extracting a profile from spacial data
========================================
The function :func:`fatiando.gridder.profile` can be used to extract a profile
of data from a map. It interpolates the data onto the profile points so you can
specify the profile in any direction and use irregular point data as input.
"""
import matplotlib.pyplot as plt
import numpy as np
from fatiando import gridder, utils
# Generate random points
x, y = gridder.scatter((-2, 2, -2, 2), n=1000, seed=1)
# And calculate 2D Gaussians on these points as sample data
data = 2*utils.gaussian2d(x, y, -0.6, -1) - utils.gaussian2d(x, y, 1.5, 1.5)
# Extract a profile between points 1 and 2
p1, p2 = [-1.5, -0.5], [1.5, 1.5]
xp, yp, distance, profile = gridder.profile(x, y, data, p1, p2, 100)
# Plot the profile and the original map data
plt.figure()
plt.subplot(2, 1, 1)
plt.title('Extracted profile points')
plt.plot(distance, profile, '.k')
plt.xlim(distance.min(), distance.max())
plt.grid()
plt.subplot(2, 1, 2)
plt.title("Original data")
plt.plot(xp, yp, '-k', label='Profile', linewidth=2)
scale = np.abs([data.min(), data.max()]).max()
plt.tricontourf(x, y, data, 50, cmap='RdBu_r', vmin=-scale, vmax=scale)
plt.colorbar(orientation='horizontal', aspect=50)
plt.legend(loc='lower right')
plt.tight_layout()
plt.show()
| bsd-3-clause |
ssaeger/scikit-learn | examples/gaussian_process/plot_gpc.py | 103 | 3927 | """
====================================================================
Probabilistic predictions with Gaussian process classification (GPC)
====================================================================
This example illustrates the predicted probability of GPC for an RBF kernel
with different choices of the hyperparameters. The first figure shows the
predicted probability of GPC with arbitrarily chosen hyperparameters and with
the hyperparameters corresponding to the maximum log-marginal-likelihood (LML).
While the hyperparameters chosen by optimizing LML have a considerable larger
LML, they perform slightly worse according to the log-loss on test data. The
figure shows that this is because they exhibit a steep change of the class
probabilities at the class boundaries (which is good) but have predicted
probabilities close to 0.5 far away from the class boundaries (which is bad)
This undesirable effect is caused by the Laplace approximation used
internally by GPC.
The second figure shows the log-marginal-likelihood for different choices of
the kernel's hyperparameters, highlighting the two choices of the
hyperparameters used in the first figure by black dots.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.metrics.classification import accuracy_score, log_loss
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# Generate data
train_size = 50
rng = np.random.RandomState(0)
X = rng.uniform(0, 5, 100)[:, np.newaxis]
y = np.array(X[:, 0] > 2.5, dtype=int)
# Specify Gaussian Processes with fixed and optimized hyperparameters
gp_fix = GaussianProcessClassifier(kernel=1.0 * RBF(length_scale=1.0),
optimizer=None)
gp_fix.fit(X[:train_size], y[:train_size])
gp_opt = GaussianProcessClassifier(kernel=1.0 * RBF(length_scale=1.0))
gp_opt.fit(X[:train_size], y[:train_size])
print("Log Marginal Likelihood (initial): %.3f"
% gp_fix.log_marginal_likelihood(gp_fix.kernel_.theta))
print("Log Marginal Likelihood (optimized): %.3f"
% gp_opt.log_marginal_likelihood(gp_opt.kernel_.theta))
print("Accuracy: %.3f (initial) %.3f (optimized)"
% (accuracy_score(y[:train_size], gp_fix.predict(X[:train_size])),
accuracy_score(y[:train_size], gp_opt.predict(X[:train_size]))))
print("Log-loss: %.3f (initial) %.3f (optimized)"
% (log_loss(y[:train_size], gp_fix.predict_proba(X[:train_size])[:, 1]),
log_loss(y[:train_size], gp_opt.predict_proba(X[:train_size])[:, 1])))
# Plot posteriors
plt.figure(0)
plt.scatter(X[:train_size, 0], y[:train_size], c='k', label="Train data")
plt.scatter(X[train_size:, 0], y[train_size:], c='g', label="Test data")
X_ = np.linspace(0, 5, 100)
plt.plot(X_, gp_fix.predict_proba(X_[:, np.newaxis])[:, 1], 'r',
label="Initial kernel: %s" % gp_fix.kernel_)
plt.plot(X_, gp_opt.predict_proba(X_[:, np.newaxis])[:, 1], 'b',
label="Optimized kernel: %s" % gp_opt.kernel_)
plt.xlabel("Feature")
plt.ylabel("Class 1 probability")
plt.xlim(0, 5)
plt.ylim(-0.25, 1.5)
plt.legend(loc="best")
# Plot LML landscape
plt.figure(1)
theta0 = np.logspace(0, 8, 30)
theta1 = np.logspace(-1, 1, 29)
Theta0, Theta1 = np.meshgrid(theta0, theta1)
LML = [[gp_opt.log_marginal_likelihood(np.log([Theta0[i, j], Theta1[i, j]]))
for i in range(Theta0.shape[0])] for j in range(Theta0.shape[1])]
LML = np.array(LML).T
plt.plot(np.exp(gp_fix.kernel_.theta)[0], np.exp(gp_fix.kernel_.theta)[1],
'ko', zorder=10)
plt.plot(np.exp(gp_opt.kernel_.theta)[0], np.exp(gp_opt.kernel_.theta)[1],
'ko', zorder=10)
plt.pcolor(Theta0, Theta1, LML)
plt.xscale("log")
plt.yscale("log")
plt.colorbar()
plt.xlabel("Magnitude")
plt.ylabel("Length-scale")
plt.title("Log-marginal-likelihood")
plt.show()
| bsd-3-clause |
JesseLivezey/pymc3 | pymc3/examples/stochastic_volatility.py | 13 | 4096 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
from matplotlib.pylab import *
import numpy as np
from pymc3 import *
from pymc3.distributions.timeseries import *
from scipy.sparse import csc_matrix
from scipy import optimize
# <markdowncell>
# Asset prices have time-varying volatility (variance of day over day `returns`). In some periods, returns are highly variable, while in others very stable. Stochastic volatility models model this with a latent volatility variable, modeled as a stochastic process. The following model is similar to the one described in the No-U-Turn Sampler paper, Hoffman (2011) p21.
#
# $$ \sigma \sim Exponential(50) $$
#
# $$ \nu \sim Exponential(.1) $$
#
# $$ s_i \sim Normal(s_{i-1}, \sigma^{-2}) $$
#
# $$ log(\frac{y_i}{y_{i-1}}) \sim t(\nu, 0, exp(-2 s_i)) $$
#
# Here, $y$ is the daily return series and $s$ is the latent log
# volatility process.
# <markdowncell>
# ## Build Model
# <markdowncell>
# First we load some daily returns of the S&P 500.
# <codecell>
n = 400
returns = np.genfromtxt(get_data_file('pymc3.examples', "data/SP500.csv"))[-n:]
returns[:5]
# <markdowncell>
# Specifying the model in pymc3 mirrors its statistical specification.
#
# However, it is easier to sample the scale of the log volatility process innovations, $\sigma$, on a log scale, so we create it using `TransformedVar` and use `logtransform`. `TransformedVar` creates one variable in the transformed space and one in the normal space. The one in the transformed space (here $\text{log}(\sigma) $) is the one over which sampling will occur, and the one in the normal space is the one to use throughout the rest of the model.
#
# It takes a variable name, a distribution and a transformation to use.
# <codecell>
model = Model()
with model:
sigma= Exponential('sigma', 1. / .02, testval=.1)
nu = Exponential('nu', 1. / 10)
s = GaussianRandomWalk('s', sigma ** -2, shape=n)
r = T('r', nu, lam=exp(-2 * s), observed=returns)
# <markdowncell>
# ## Fit Model
#
# To get a decent scaling matrix for the Hamiltonian sampler, we find the Hessian at a point. The method `Model.d2logpc` gives us a `Theano` compiled function that returns the matrix of 2nd derivatives.
#
# However, the 2nd derivatives for the degrees of freedom parameter, `nu`, are negative and thus not very informative and make the matrix non-positive definite, so we replace that entry with a reasonable guess at the scale. The interactions between `log_sigma`/`nu` and `s` are also not very useful, so we set them to zero.
# <markdowncell>
# For this model, the full maximum a posteriori (MAP) point is degenerate and has infinite density. However, if we fix `log_sigma` and `nu` it is no longer degenerate, so we find the MAP with respect to the volatility process, 's', keeping `log_sigma` and `nu` constant at their default values.
#
# We use L-BFGS because it is more efficient for high dimensional
# functions (`s` has n elements).
# <markdowncell>
# We do a short initial run to get near the right area, then start again
# using a new Hessian at the new starting point to get faster sampling due
# to better scaling. We do a short run since this is an interactive
# example.
# <codecell>
def run(n=2000):
if n == "short":
n = 50
with model:
start = find_MAP(vars=[s], fmin=optimize.fmin_l_bfgs_b)
step = NUTS(model.vars, scaling=start, gamma=.25)
trace = sample(5, step, start)
# Start next run at the last sampled position.
start2 = trace.point(-1)
step2 = NUTS(model.vars, scaling=start2, gamma=.25)
trace = sample(n, step2, trace=trace)
# <codecell>
# figsize(12,6)
title(str(s))
plot(trace[s][::10].T, 'b', alpha=.03)
xlabel('time')
ylabel('log volatility')
# figsize(12,6)
traceplot(trace, model.vars[:-1])
if __name__ == '__main__':
run()
# <markdowncell>
# ## References
#
# 1. Hoffman & Gelman. (2011). [The No-U-Turn Sampler: Adaptively Setting
# Path Lengths in Hamiltonian Monte
# Carlo](http://arxiv.org/abs/1111.4246).
| apache-2.0 |
RachitKansal/scikit-learn | examples/ensemble/plot_bias_variance.py | 357 | 7324 | """
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
| bsd-3-clause |
sangwook236/SWDT | sw_dev/python/rnd/test/statistics/pysad/pysad_basic.py | 1 | 5580 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# REF [site] >>
# https://github.com/selimfirat/pysad
# https://pysad.readthedocs.io/en/latest/
import numpy as np
from pysad.evaluation import AUROCMetric
from pysad.models import LODA, xStream
from pyod.models.iforest import IForest
from pysad.models.integrations import ReferenceWindowModel
from pysad.utils import ArrayStreamer
from pysad.transform.postprocessing import RunningAveragePostprocessor
from pysad.transform.preprocessing import InstanceUnitNormScaler
from pysad.transform.ensemble import AverageScoreEnsembler
from pysad.transform.probability_calibration import ConformalProbabilityCalibrator
from pysad.statistics import AverageMeter, VarianceMeter
from pysad.utils import ArrayStreamer, Data
from sklearn.utils import shuffle
from tqdm import tqdm
# REF [site] >> https://pysad.readthedocs.io/en/latest/examples.html
def full_usage_example():
np.random.seed(61) # Fix random seed.
# Get data to stream.
data = Data("data")
X_all, y_all = data.get_data("arrhythmia.mat")
X_all, y_all = shuffle(X_all, y_all)
iterator = ArrayStreamer(shuffle=False) # Init streamer to simulate streaming data.
model = xStream() # Init xStream anomaly detection model.
preprocessor = InstanceUnitNormScaler() # Init normalizer.
postprocessor = RunningAveragePostprocessor(window_size=5) # Init running average postprocessor.
auroc = AUROCMetric() # Init area under receiver-operating- characteristics curve metric.
for X, y in tqdm(iterator.iter(X_all[100:], y_all[100:])): # Stream data.
X = preprocessor.fit_transform_partial(X) # Fit preprocessor to and transform the instance.
score = model.fit_score_partial(X) # Fit model to and score the instance.
score = postprocessor.fit_transform_partial(score) # Apply running averaging to the score.
auroc.update(y, score) # Update AUROC metric.
# Output resulting AUROCS metric.
print("AUROC: {}.".format(auroc.get()))
# REF [site] >> https://pysad.readthedocs.io/en/latest/examples.html
def statistics_usage_example():
# Init data with mean 0 and standard deviation 1.
X = np.random.randn(1000)
# Init statistics trackers for mean and variance.
avg_meter = AverageMeter()
var_meter = VarianceMeter()
for i in range(1000):
# Update statistics trackers.
avg_meter.update(X[i])
var_meter.update(X[i])
# Output resulting statistics.
print(f"Average: {avg_meter.get()}, Standard deviation: {np.sqrt(var_meter.get())}")
# It is close to random normal distribution with mean 0 and std 1 as we init the array via np.random.rand.
# REF [site] >> https://pysad.readthedocs.io/en/latest/examples.html
def ensembler_usage_example():
np.random.seed(61) # Fix random seed.
data = Data("data")
X_all, y_all = data.get_data("arrhythmia.mat") # Load Aryhytmia data.
X_all, y_all = shuffle(X_all, y_all) # Shuffle data.
iterator = ArrayStreamer(shuffle=False) # Create streamer to simulate streaming data.
auroc = AUROCMetric() # Tracker of area under receiver-operating- characteristics curve metric.
# Models to be ensembled.
models = [
xStream(),
LODA()
]
ensembler = AverageScoreEnsembler() # Ensembler module.
for X, y in tqdm(iterator.iter(X_all, y_all)): # Iterate over examples.
model_scores = np.empty(len(models), dtype=np.float64)
# Fit & Score via for each model.
for i, model in enumerate(models):
model.fit_partial(X)
model_scores[i] = model.score_partial(X)
score = ensembler.fit_transform_partial(model_scores) # Fit to ensembler model and get ensembled score.
auroc.update(y, score) # Update AUROC metric.
# Output score.
print("AUROC: {}.".format(auroc.get()))
# REF [site] >> https://pysad.readthedocs.io/en/latest/examples.html
def probability_calibrator_usage_example():
np.random.seed(61) # Fix seed.
model = xStream() # Init model.
calibrator = ConformalProbabilityCalibrator(windowed=True, window_size=300) # Init probability calibrator.
streaming_data = Data().get_iterator("arrhythmia.mat") # Get streamer.
for i, (x, y_true) in enumerate(streaming_data): # Stream data.
anomaly_score = model.fit_score_partial(x) # Fit to an instance x and score it.
calibrated_score = calibrator.fit_transform(anomaly_score) # Fit & calibrate score.
# Output if the instance is anomalous.
if calibrated_score > 0.95: # If probability of being normal is less than 5%.
print(f"Alert: {i}th data point is anomalous.")
# REF [site] >> https://pysad.readthedocs.io/en/latest/examples.html
def PyOD_integration_example():
np.random.seed(61) # Fix seed.
# Get data to stream.
data = Data("data")
X_all, y_all = data.get_data("arrhythmia.mat")
X_all, y_all = shuffle(X_all, y_all)
iterator = ArrayStreamer(shuffle=False)
# Fit reference window integration to first 100 instances initially.
model = ReferenceWindowModel(model_cls=IForest, window_size=240, sliding_size=30, initial_window_X=X_all[:100])
auroc = AUROCMetric() # Init area under receiver-operating-characteristics curve metric tracker.
for X, y in tqdm(iterator.iter(X_all[100:], y_all[100:])):
model.fit_partial(X) # Fit to the instance.
score = model.score_partial(X) # Score the instance.
auroc.update(y, score) # Update the metric.
# Output AUROC metric.
print("AUROC: {}.".format(auroc.get()))
def main():
full_usage_example()
#statistics_usage_example()
#ensembler_usage_example()
#probability_calibrator_usage_example()
#PyOD_integration_example()
#--------------------------------------------------------------------
if "__main__" == __name__:
main()
| gpl-3.0 |
dsquareindia/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 82 | 4768 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_splits=3):
cv = KFold(n_splits=n_splits)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv.split(X_train, y_train):
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_splits
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
rasbt/python-machine-learning-book-2nd-edition | code/ch03/ch03.py | 1 | 18679 | # coding: utf-8
from sklearn import __version__ as sklearn_version
from distutils.version import LooseVersion
from sklearn import datasets
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Perceptron
from sklearn.metrics import accuracy_score
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from pydotplus import graph_from_dot_data
from sklearn.tree import export_graphviz
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
# *Python Machine Learning 2nd Edition* by [Sebastian Raschka](https://sebastianraschka.com), Packt Publishing Ltd. 2017
#
# Code Repository: https://github.com/rasbt/python-machine-learning-book-2nd-edition
#
# Code License: [MIT License](https://github.com/rasbt/python-machine-learning-book-2nd-edition/blob/master/LICENSE.txt)
# # Python Machine Learning - Code Examples
# # Chapter 3 - A Tour of Machine Learning Classifiers Using Scikit-Learn
# Note that the optional watermark extension is a small IPython notebook plugin that I developed to make the code reproducible. You can just skip the following line(s).
if LooseVersion(sklearn_version) < LooseVersion('0.18'):
raise ValueError('Please use scikit-learn 0.18 or newer')
# *The use of `watermark` is optional. You can install this IPython extension via "`pip install watermark`". For more information, please see: https://github.com/rasbt/watermark.*
# ### Overview
# - [Choosing a classification algorithm](#Choosing-a-classification-algorithm)
# - [First steps with scikit-learn](#First-steps-with-scikit-learn)
# - [Training a perceptron via scikit-learn](#Training-a-perceptron-via-scikit-learn)
# - [Modeling class probabilities via logistic regression](#Modeling-class-probabilities-via-logistic-regression)
# - [Logistic regression intuition and conditional probabilities](#Logistic-regression-intuition-and-conditional-probabilities)
# - [Learning the weights of the logistic cost function](#Learning-the-weights-of-the-logistic-cost-function)
# - [Training a logistic regression model with scikit-learn](#Training-a-logistic-regression-model-with-scikit-learn)
# - [Tackling overfitting via regularization](#Tackling-overfitting-via-regularization)
# - [Maximum margin classification with support vector machines](#Maximum-margin-classification-with-support-vector-machines)
# - [Maximum margin intuition](#Maximum-margin-intuition)
# - [Dealing with the nonlinearly separable case using slack variables](#Dealing-with-the-nonlinearly-separable-case-using-slack-variables)
# - [Alternative implementations in scikit-learn](#Alternative-implementations-in-scikit-learn)
# - [Solving nonlinear problems using a kernel SVM](#Solving-nonlinear-problems-using-a-kernel-SVM)
# - [Using the kernel trick to find separating hyperplanes in higher dimensional space](#Using-the-kernel-trick-to-find-separating-hyperplanes-in-higher-dimensional-space)
# - [Decision tree learning](#Decision-tree-learning)
# - [Maximizing information gain – getting the most bang for the buck](#Maximizing-information-gain-–-getting-the-most-bang-for-the-buck)
# - [Building a decision tree](#Building-a-decision-tree)
# - [Combining weak to strong learners via random forests](#Combining-weak-to-strong-learners-via-random-forests)
# - [K-nearest neighbors – a lazy learning algorithm](#K-nearest-neighbors-–-a-lazy-learning-algorithm)
# - [Summary](#Summary)
# # Choosing a classification algorithm
# ...
# # First steps with scikit-learn
# Loading the Iris dataset from scikit-learn. Here, the third column represents the petal length, and the fourth column the petal width of the flower samples. The classes are already converted to integer labels where 0=Iris-Setosa, 1=Iris-Versicolor, 2=Iris-Virginica.
iris = datasets.load_iris()
X = iris.data[:, [2, 3]]
y = iris.target
print('Class labels:', np.unique(y))
# Splitting data into 70% training and 30% test data:
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=1, stratify=y)
print('Labels counts in y:', np.bincount(y))
print('Labels counts in y_train:', np.bincount(y_train))
print('Labels counts in y_test:', np.bincount(y_test))
# Standardizing the features:
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
# ## Training a perceptron via scikit-learn
# Redefining the `plot_decision_region` function from chapter 2:
ppn = Perceptron(n_iter=40, eta0=0.1, random_state=1)
ppn.fit(X_train_std, y_train)
# **Note**
#
# - You can replace `Perceptron(n_iter, ...)` by `Perceptron(max_iter, ...)` in scikit-learn >= 0.19. The `n_iter` parameter is used here deriberately, because some people still use scikit-learn 0.18.
y_pred = ppn.predict(X_test_std)
print('Misclassified samples: %d' % (y_test != y_pred).sum())
print('Accuracy: %.2f' % accuracy_score(y_test, y_pred))
print('Accuracy: %.2f' % ppn.score(X_test_std, y_test))
def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0],
y=X[y == cl, 1],
alpha=0.8,
c=colors[idx],
marker=markers[idx],
label=cl,
edgecolor='black')
# highlight test samples
if test_idx:
# plot all samples
X_test, y_test = X[test_idx, :], y[test_idx]
plt.scatter(X_test[:, 0],
X_test[:, 1],
c='',
edgecolor='black',
alpha=1.0,
linewidth=1,
marker='o',
s=100,
label='test set')
# Training a perceptron model using the standardized training data:
X_combined_std = np.vstack((X_train_std, X_test_std))
y_combined = np.hstack((y_train, y_test))
plot_decision_regions(X=X_combined_std, y=y_combined,
classifier=ppn, test_idx=range(105, 150))
plt.xlabel('petal length [standardized]')
plt.ylabel('petal width [standardized]')
plt.legend(loc='upper left')
plt.tight_layout()
#plt.savefig('images/03_01.png', dpi=300)
plt.show()
# # Modeling class probabilities via logistic regression
# ...
# ### Logistic regression intuition and conditional probabilities
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
z = np.arange(-7, 7, 0.1)
phi_z = sigmoid(z)
plt.plot(z, phi_z)
plt.axvline(0.0, color='k')
plt.ylim(-0.1, 1.1)
plt.xlabel('z')
plt.ylabel('$\phi (z)$')
# y axis ticks and gridline
plt.yticks([0.0, 0.5, 1.0])
ax = plt.gca()
ax.yaxis.grid(True)
plt.tight_layout()
#plt.savefig('images/03_02.png', dpi=300)
plt.show()
# ### Learning the weights of the logistic cost function
def cost_1(z):
return - np.log(sigmoid(z))
def cost_0(z):
return - np.log(1 - sigmoid(z))
z = np.arange(-10, 10, 0.1)
phi_z = sigmoid(z)
c1 = [cost_1(x) for x in z]
plt.plot(phi_z, c1, label='J(w) if y=1')
c0 = [cost_0(x) for x in z]
plt.plot(phi_z, c0, linestyle='--', label='J(w) if y=0')
plt.ylim(0.0, 5.1)
plt.xlim([0, 1])
plt.xlabel('$\phi$(z)')
plt.ylabel('J(w)')
plt.legend(loc='best')
plt.tight_layout()
#plt.savefig('images/03_04.png', dpi=300)
plt.show()
class LogisticRegressionGD(object):
"""Logistic Regression Classifier using gradient descent.
Parameters
------------
eta : float
Learning rate (between 0.0 and 1.0)
n_iter : int
Passes over the training dataset.
random_state : int
Random number generator seed for random weight
initialization.
Attributes
-----------
w_ : 1d-array
Weights after fitting.
cost_ : list
Logistic cost function value in each epoch.
"""
def __init__(self, eta=0.05, n_iter=100, random_state=1):
self.eta = eta
self.n_iter = n_iter
self.random_state = random_state
def fit(self, X, y):
""" Fit training data.
Parameters
----------
X : {array-like}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
rgen = np.random.RandomState(self.random_state)
self.w_ = rgen.normal(loc=0.0, scale=0.01, size=1 + X.shape[1])
self.cost_ = []
for i in range(self.n_iter):
net_input = self.net_input(X)
output = self.activation(net_input)
errors = (y - output)
self.w_[1:] += self.eta * X.T.dot(errors)
self.w_[0] += self.eta * errors.sum()
# note that we compute the logistic `cost` now
# instead of the sum of squared errors cost
cost = -y.dot(np.log(output)) - ((1 - y).dot(np.log(1 - output)))
self.cost_.append(cost)
return self
def net_input(self, X):
"""Calculate net input"""
return np.dot(X, self.w_[1:]) + self.w_[0]
def activation(self, z):
"""Compute logistic sigmoid activation"""
return 1. / (1. + np.exp(-np.clip(z, -250, 250)))
def predict(self, X):
"""Return class label after unit step"""
return np.where(self.net_input(X) >= 0.0, 1, 0)
# equivalent to:
# return np.where(self.activation(self.net_input(X)) >= 0.5, 1, 0)
X_train_01_subset = X_train[(y_train == 0) | (y_train == 1)]
y_train_01_subset = y_train[(y_train == 0) | (y_train == 1)]
lrgd = LogisticRegressionGD(eta=0.05, n_iter=1000, random_state=1)
lrgd.fit(X_train_01_subset,
y_train_01_subset)
plot_decision_regions(X=X_train_01_subset,
y=y_train_01_subset,
classifier=lrgd)
plt.xlabel('petal length [standardized]')
plt.ylabel('petal width [standardized]')
plt.legend(loc='upper left')
plt.tight_layout()
#plt.savefig('images/03_05.png', dpi=300)
plt.show()
# ### Training a logistic regression model with scikit-learn
lr = LogisticRegression(C=100.0, random_state=1)
lr.fit(X_train_std, y_train)
plot_decision_regions(X_combined_std, y_combined,
classifier=lr, test_idx=range(105, 150))
plt.xlabel('petal length [standardized]')
plt.ylabel('petal width [standardized]')
plt.legend(loc='upper left')
plt.tight_layout()
#plt.savefig('images/03_06.png', dpi=300)
plt.show()
lr.predict_proba(X_test_std[:3, :])
lr.predict_proba(X_test_std[:3, :]).sum(axis=1)
lr.predict_proba(X_test_std[:3, :]).argmax(axis=1)
lr.predict(X_test_std[:3, :])
lr.predict(X_test_std[0, :].reshape(1, -1))
# ### Tackling overfitting via regularization
weights, params = [], []
for c in np.arange(-5, 5):
lr = LogisticRegression(C=10.**c, random_state=1)
lr.fit(X_train_std, y_train)
weights.append(lr.coef_[1])
params.append(10.**c)
weights = np.array(weights)
plt.plot(params, weights[:, 0],
label='petal length')
plt.plot(params, weights[:, 1], linestyle='--',
label='petal width')
plt.ylabel('weight coefficient')
plt.xlabel('C')
plt.legend(loc='upper left')
plt.xscale('log')
#plt.savefig('images/03_08.png', dpi=300)
plt.show()
# # Maximum margin classification with support vector machines
# ## Maximum margin intuition
# ...
# ## Dealing with the nonlinearly separable case using slack variables
svm = SVC(kernel='linear', C=1.0, random_state=1)
svm.fit(X_train_std, y_train)
plot_decision_regions(X_combined_std,
y_combined,
classifier=svm,
test_idx=range(105, 150))
plt.xlabel('petal length [standardized]')
plt.ylabel('petal width [standardized]')
plt.legend(loc='upper left')
plt.tight_layout()
#plt.savefig('images/03_11.png', dpi=300)
plt.show()
# ## Alternative implementations in scikit-learn
ppn = SGDClassifier(loss='perceptron', n_iter=1000)
lr = SGDClassifier(loss='log', n_iter=1000)
svm = SGDClassifier(loss='hinge', n_iter=1000)
# **Note**
#
# - You can replace `Perceptron(n_iter, ...)` by `Perceptron(max_iter, ...)` in scikit-learn >= 0.19. The `n_iter` parameter is used here deriberately, because some people still use scikit-learn 0.18.
# # Solving non-linear problems using a kernel SVM
np.random.seed(1)
X_xor = np.random.randn(200, 2)
y_xor = np.logical_xor(X_xor[:, 0] > 0,
X_xor[:, 1] > 0)
y_xor = np.where(y_xor, 1, -1)
plt.scatter(X_xor[y_xor == 1, 0],
X_xor[y_xor == 1, 1],
c='b', marker='x',
label='1')
plt.scatter(X_xor[y_xor == -1, 0],
X_xor[y_xor == -1, 1],
c='r',
marker='s',
label='-1')
plt.xlim([-3, 3])
plt.ylim([-3, 3])
plt.legend(loc='best')
plt.tight_layout()
#plt.savefig('images/03_12.png', dpi=300)
plt.show()
# ## Using the kernel trick to find separating hyperplanes in higher dimensional space
svm = SVC(kernel='rbf', random_state=1, gamma=0.10, C=10.0)
svm.fit(X_xor, y_xor)
plot_decision_regions(X_xor, y_xor,
classifier=svm)
plt.legend(loc='upper left')
plt.tight_layout()
#plt.savefig('images/03_14.png', dpi=300)
plt.show()
svm = SVC(kernel='rbf', random_state=1, gamma=0.2, C=1.0)
svm.fit(X_train_std, y_train)
plot_decision_regions(X_combined_std, y_combined,
classifier=svm, test_idx=range(105, 150))
plt.xlabel('petal length [standardized]')
plt.ylabel('petal width [standardized]')
plt.legend(loc='upper left')
plt.tight_layout()
#plt.savefig('images/03_15.png', dpi=300)
plt.show()
svm = SVC(kernel='rbf', random_state=1, gamma=100.0, C=1.0)
svm.fit(X_train_std, y_train)
plot_decision_regions(X_combined_std, y_combined,
classifier=svm, test_idx=range(105, 150))
plt.xlabel('petal length [standardized]')
plt.ylabel('petal width [standardized]')
plt.legend(loc='upper left')
plt.tight_layout()
#plt.savefig('images/03_16.png', dpi=300)
plt.show()
# # Decision tree learning
# ## Maximizing information gain - getting the most bang for the buck
def gini(p):
return p * (1 - p) + (1 - p) * (1 - (1 - p))
def entropy(p):
return - p * np.log2(p) - (1 - p) * np.log2((1 - p))
def error(p):
return 1 - np.max([p, 1 - p])
x = np.arange(0.0, 1.0, 0.01)
ent = [entropy(p) if p != 0 else None for p in x]
sc_ent = [e * 0.5 if e else None for e in ent]
err = [error(i) for i in x]
fig = plt.figure()
ax = plt.subplot(111)
for i, lab, ls, c, in zip([ent, sc_ent, gini(x), err],
['Entropy', 'Entropy (scaled)',
'Gini Impurity', 'Misclassification Error'],
['-', '-', '--', '-.'],
['black', 'lightgray', 'red', 'green', 'cyan']):
line = ax.plot(x, i, label=lab, linestyle=ls, lw=2, color=c)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15),
ncol=5, fancybox=True, shadow=False)
ax.axhline(y=0.5, linewidth=1, color='k', linestyle='--')
ax.axhline(y=1.0, linewidth=1, color='k', linestyle='--')
plt.ylim([0, 1.1])
plt.xlabel('p(i=1)')
plt.ylabel('Impurity Index')
#plt.savefig('images/03_19.png', dpi=300, bbox_inches='tight')
plt.show()
# ## Building a decision tree
tree = DecisionTreeClassifier(criterion='gini',
max_depth=4,
random_state=1)
tree.fit(X_train, y_train)
X_combined = np.vstack((X_train, X_test))
y_combined = np.hstack((y_train, y_test))
plot_decision_regions(X_combined, y_combined,
classifier=tree, test_idx=range(105, 150))
plt.xlabel('petal length [cm]')
plt.ylabel('petal width [cm]')
plt.legend(loc='upper left')
plt.tight_layout()
#plt.savefig('images/03_20.png', dpi=300)
plt.show()
dot_data = export_graphviz(tree,
filled=True,
rounded=True,
class_names=['Setosa',
'Versicolor',
'Virginica'],
feature_names=['petal length',
'petal width'],
out_file=None)
graph = graph_from_dot_data(dot_data)
graph.write_png('tree.png')
# ## Combining weak to strong learners via random forests
forest = RandomForestClassifier(criterion='gini',
n_estimators=25,
random_state=1,
n_jobs=2)
forest.fit(X_train, y_train)
plot_decision_regions(X_combined, y_combined,
classifier=forest, test_idx=range(105, 150))
plt.xlabel('petal length [cm]')
plt.ylabel('petal width [cm]')
plt.legend(loc='upper left')
plt.tight_layout()
#plt.savefig('images/03_22.png', dpi=300)
plt.show()
# # K-nearest neighbors - a lazy learning algorithm
knn = KNeighborsClassifier(n_neighbors=5,
p=2,
metric='minkowski')
knn.fit(X_train_std, y_train)
plot_decision_regions(X_combined_std, y_combined,
classifier=knn, test_idx=range(105, 150))
plt.xlabel('petal length [standardized]')
plt.ylabel('petal width [standardized]')
plt.legend(loc='upper left')
plt.tight_layout()
#plt.savefig('images/03_24.png', dpi=300)
plt.show()
# # Summary
# ...
# ---
#
# Readers may ignore the next cell.
| mit |
sudikrt/costproML | testproject/test.py | 1 | 2773 | '''
Import Libs
'''
import pandas as pd
import numpy as np
from pandas.tools.plotting import scatter_matrix
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from math import sin, cos, sqrt, atan2, radians
def finddist (lat1,lon1,lat2,lon2):
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance = R * c
return distance
#Read Data
dataset = pd.read_csv("damnms.csv", dtype={'supplydemand':'int','cost':'int'})
#print shape
print dataset.shape
#description
print dataset.describe()
#class
print (dataset.groupby('job').size())
print dataset.columns
dataset['lat'] = dataset['lat'].apply(lambda x: str(x))
dataset['lng'] = dataset['lng'].apply(lambda x: str(x))
#dataset['id'] = dataset['id'].apply(pd.to_numeric)
dataset['id'] = dataset['id'].apply(lambda x: int(x))
dataset['cost'] = dataset['cost'].apply(lambda x: int(x))
print dataset.dtypes
'''
print dataset.describe()
columns = dataset.columns.tolist()
job="Tester"
radius=10
df_ = pd.DataFrame()
for index, row in dataset.iterrows():
if (row["job"] == job):
df_.append(np.array(row))
print df_
columns = dataset.columns.tolist()
columns = [c for c in columns if c not in ["job", "place", "cost"]]
target = "cost"
train = dataset.sample(frac = 0.8, random_state = 1)
test = dataset.loc[~dataset.index.isin(train.index)]
print(train.shape)
print(test.shape)
model = LinearRegression()
model.fit(train[columns], train[target])
predictions = model.predict(test[columns])
print test["cost"]
print predictions
print 'Error rate', mean_squared_error(predictions, test[target])
'''
array = dataset.values
X = array[:,5:8]
Y = array[:,8]
validation_size = 0.20
seed = 7
X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(X, Y, test_size=validation_size, random_state=seed)
knn = KNeighborsClassifier()
knn.fit(X_train, Y_train)
predictions = knn.predict(X_validation)
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
| apache-2.0 |
jseabold/statsmodels | statsmodels/robust/tests/test_scale.py | 4 | 8652 | """
Test functions for models.robust.scale
"""
import numpy as np
from numpy.random import standard_normal
from numpy.testing import assert_almost_equal, assert_equal
import pytest
from scipy.stats import norm as Gaussian
import statsmodels.api as sm
import statsmodels.robust.scale as scale
from statsmodels.robust.scale import mad
# Example from Section 5.5, Venables & Ripley (2002)
DECIMAL = 4
# TODO: Can replicate these tests using stackloss data and R if this
# data is a problem
class TestChem(object):
@classmethod
def setup_class(cls):
cls.chem = np.array(
[
2.20,
2.20,
2.4,
2.4,
2.5,
2.7,
2.8,
2.9,
3.03,
3.03,
3.10,
3.37,
3.4,
3.4,
3.4,
3.5,
3.6,
3.7,
3.7,
3.7,
3.7,
3.77,
5.28,
28.95,
]
)
def test_mean(self):
assert_almost_equal(np.mean(self.chem), 4.2804, DECIMAL)
def test_median(self):
assert_almost_equal(np.median(self.chem), 3.385, DECIMAL)
def test_mad(self):
assert_almost_equal(scale.mad(self.chem), 0.52632, DECIMAL)
def test_iqr(self):
assert_almost_equal(scale.iqr(self.chem), 0.68570, DECIMAL)
def test_qn(self):
assert_almost_equal(scale.qn_scale(self.chem), 0.73231, DECIMAL)
def test_huber_scale(self):
assert_almost_equal(scale.huber(self.chem)[0], 3.20549, DECIMAL)
def test_huber_location(self):
assert_almost_equal(scale.huber(self.chem)[1], 0.67365, DECIMAL)
def test_huber_huberT(self):
n = scale.norms.HuberT()
n.t = 1.5
h = scale.Huber(norm=n)
assert_almost_equal(
scale.huber(self.chem)[0], h(self.chem)[0], DECIMAL
)
assert_almost_equal(
scale.huber(self.chem)[1], h(self.chem)[1], DECIMAL
)
def test_huber_Hampel(self):
hh = scale.Huber(norm=scale.norms.Hampel())
assert_almost_equal(hh(self.chem)[0], 3.17434, DECIMAL)
assert_almost_equal(hh(self.chem)[1], 0.66782, DECIMAL)
class TestMad(object):
@classmethod
def setup_class(cls):
np.random.seed(54321)
cls.X = standard_normal((40, 10))
def test_mad(self):
m = scale.mad(self.X)
assert_equal(m.shape, (10,))
def test_mad_empty(self):
empty = np.empty(0)
assert np.isnan(scale.mad(empty))
empty = np.empty((10, 100, 0))
assert_equal(scale.mad(empty, axis=1), np.empty((10, 0)))
empty = np.empty((100, 100, 0, 0))
assert_equal(scale.mad(empty, axis=-1), np.empty((100, 100, 0)))
def test_mad_center(self):
n = scale.mad(self.X, center=0)
assert_equal(n.shape, (10,))
with pytest.raises(TypeError):
scale.mad(self.X, center=None)
assert_almost_equal(
scale.mad(self.X, center=1),
np.median(np.abs(self.X - 1), axis=0) / Gaussian.ppf(3 / 4.0),
DECIMAL,
)
class TestMadAxes(object):
@classmethod
def setup_class(cls):
np.random.seed(54321)
cls.X = standard_normal((40, 10, 30))
def test_axis0(self):
m = scale.mad(self.X, axis=0)
assert_equal(m.shape, (10, 30))
def test_axis1(self):
m = scale.mad(self.X, axis=1)
assert_equal(m.shape, (40, 30))
def test_axis2(self):
m = scale.mad(self.X, axis=2)
assert_equal(m.shape, (40, 10))
def test_axisneg1(self):
m = scale.mad(self.X, axis=-1)
assert_equal(m.shape, (40, 10))
class TestIqr(object):
@classmethod
def setup_class(cls):
np.random.seed(54321)
cls.X = standard_normal((40, 10))
def test_iqr(self):
m = scale.iqr(self.X)
assert_equal(m.shape, (10,))
def test_iqr_empty(self):
empty = np.empty(0)
assert np.isnan(scale.iqr(empty))
empty = np.empty((10, 100, 0))
assert_equal(scale.iqr(empty, axis=1), np.empty((10, 0)))
empty = np.empty((100, 100, 0, 0))
assert_equal(scale.iqr(empty, axis=-1), np.empty((100, 100, 0)))
empty = np.empty(shape=())
with pytest.raises(ValueError):
scale.iqr(empty)
class TestIqrAxes(object):
@classmethod
def setup_class(cls):
np.random.seed(54321)
cls.X = standard_normal((40, 10, 30))
def test_axis0(self):
m = scale.iqr(self.X, axis=0)
assert_equal(m.shape, (10, 30))
def test_axis1(self):
m = scale.iqr(self.X, axis=1)
assert_equal(m.shape, (40, 30))
def test_axis2(self):
m = scale.iqr(self.X, axis=2)
assert_equal(m.shape, (40, 10))
def test_axisneg1(self):
m = scale.iqr(self.X, axis=-1)
assert_equal(m.shape, (40, 10))
class TestQn(object):
@classmethod
def setup_class(cls):
np.random.seed(54321)
cls.normal = standard_normal(size=40)
cls.range = np.arange(0, 40)
cls.exponential = np.random.exponential(size=40)
cls.stackloss = sm.datasets.stackloss.load_pandas().data
cls.sunspot = sm.datasets.sunspots.load_pandas().data.SUNACTIVITY
def test_qn_naive(self):
assert_almost_equal(
scale.qn_scale(self.normal), scale._qn_naive(self.normal), DECIMAL
)
assert_almost_equal(
scale.qn_scale(self.range), scale._qn_naive(self.range), DECIMAL
)
assert_almost_equal(
scale.qn_scale(self.exponential),
scale._qn_naive(self.exponential),
DECIMAL,
)
def test_qn_robustbase(self):
# from R's robustbase with finite.corr = FALSE
assert_almost_equal(scale.qn_scale(self.range), 13.3148, DECIMAL)
assert_almost_equal(
scale.qn_scale(self.stackloss),
np.array([8.87656, 8.87656, 2.21914, 4.43828]),
DECIMAL,
)
# sunspot.year from datasets in R only goes up to 289
assert_almost_equal(
scale.qn_scale(self.sunspot[0:289]), 33.50901, DECIMAL
)
def test_qn_empty(self):
empty = np.empty(0)
assert np.isnan(scale.qn_scale(empty))
empty = np.empty((10, 100, 0))
assert_equal(scale.qn_scale(empty, axis=1), np.empty((10, 0)))
empty = np.empty((100, 100, 0, 0))
assert_equal(scale.qn_scale(empty, axis=-1), np.empty((100, 100, 0)))
empty = np.empty(shape=())
with pytest.raises(ValueError):
scale.iqr(empty)
class TestQnAxes(object):
@classmethod
def setup_class(cls):
np.random.seed(54321)
cls.X = standard_normal((40, 10, 30))
def test_axis0(self):
m = scale.qn_scale(self.X, axis=0)
assert_equal(m.shape, (10, 30))
def test_axis1(self):
m = scale.qn_scale(self.X, axis=1)
assert_equal(m.shape, (40, 30))
def test_axis2(self):
m = scale.qn_scale(self.X, axis=2)
assert_equal(m.shape, (40, 10))
def test_axisneg1(self):
m = scale.qn_scale(self.X, axis=-1)
assert_equal(m.shape, (40, 10))
class TestHuber(object):
@classmethod
def setup_class(cls):
np.random.seed(54321)
cls.X = standard_normal((40, 10))
def test_huber_result_shape(self):
h = scale.Huber(maxiter=100)
m, s = h(self.X)
assert_equal(m.shape, (10,))
class TestHuberAxes(object):
@classmethod
def setup_class(cls):
np.random.seed(54321)
cls.X = standard_normal((40, 10, 30))
cls.h = scale.Huber(maxiter=1000, tol=1.0e-05)
def test_default(self):
m, s = self.h(self.X, axis=0)
assert_equal(m.shape, (10, 30))
def test_axis1(self):
m, s = self.h(self.X, axis=1)
assert_equal(m.shape, (40, 30))
def test_axis2(self):
m, s = self.h(self.X, axis=2)
assert_equal(m.shape, (40, 10))
def test_axisneg1(self):
m, s = self.h(self.X, axis=-1)
assert_equal(m.shape, (40, 10))
def test_mad_axis_none():
# GH 7027
a = np.array([[0, 1, 2], [2, 3, 2]])
def m(x):
return np.median(x)
direct = mad(a=a, axis=None)
custom = mad(a=a, axis=None, center=m)
axis0 = mad(a=a.ravel(), axis=0)
np.testing.assert_allclose(direct, custom)
np.testing.assert_allclose(direct, axis0)
| bsd-3-clause |
Tejas-Khot/deep-learning | test/utils.py | 8 | 2048 | '''
@author: Tejas Khot
@contact: [email protected]
@note: Utility functions for CIFAR dataset
'''
import gzip, cPickle
import os
import cPickle as pickle
import matplotlib.pyplot as plt
import numpy as np
def load_CIFAR_batch(filename):
"""
load single batch of CIFAR-10 dataset
@param filename: string of file name in CIFAR
@return: X, Y: data and labels of images in the CIFAR batch
"""
with open(filename, 'r') as f:
datadict=pickle.load(f)
X=datadict['data']
Y=datadict['labels']
X=X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype("float")
Y=np.array(Y)
return X, Y
def load_CIFAR10(ROOT):
"""
load entire CIFAR-10 dataset
@param ROOT: string of data folder
@return: X_train, Y_train: training data and labels
@return: X_test, Y_test: testing data and labels
"""
xs=[]
ys=[]
for b in range(1,6):
f=os.path.join(ROOT, "data_batch_%d" % (b, ))
X, Y=load_CIFAR_batch(f)
xs.append(X)
ys.append(Y)
X_train=np.concatenate(xs)
Y_train=np.concatenate(ys)
del X, Y
X_test, Y_test=load_CIFAR_batch(os.path.join(ROOT, "test_batch"))
return X_train, Y_train, X_test, Y_test
def visualize_CIFAR(X_train,
Y_train,
samples_per_class):
"""
A visualization function for CIFAR
"""
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes=len(classes)
for y, cls in enumerate(classes):
idxs = np.flatnonzero(Y_train == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show() | gpl-2.0 |
jereze/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 56 | 37976 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.validation import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset.
for loss in ('deviance', 'exponential'):
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0), \
"Train deviance does not monotonically decrease."
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def test_classification_synthetic():
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
for loss in ('deviance', 'exponential'):
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.09, \
"GB(loss={}) failed with error {}".format(loss, error_rate)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08, ("Stochastic GradientBoostingClassifier(loss={}) "
"failed with error {}".format(loss, error_rate))
def test_boston():
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
for loss in ("ls", "lad", "huber"):
for subsample in (1.0, 0.5):
last_y_pred = None
for i, sample_weight in enumerate(
(None, np.ones(len(boston.target)),
2 * np.ones(len(boston.target)))):
clf = GradientBoostingRegressor(n_estimators=100, loss=loss,
max_depth=4, subsample=subsample,
min_samples_split=1,
random_state=1)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0, "Failed with loss %s and " \
"mse = %.4f" % (loss, mse)
if last_y_pred is not None:
np.testing.assert_array_almost_equal(
last_y_pred, y_pred,
err_msg='pred_%d doesnt match last pred_%d for loss %r and subsample %r. '
% (i, i - 1, loss, subsample))
last_y_pred = y_pred
def test_iris():
# Check consistency on dataset iris.
for subsample in (1.0, 0.5):
for sample_weight in (None, np.ones(len(iris.target))):
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=subsample)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (subsample, score)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1)
clf.fit(X, y)
#feature_importances = clf.feature_importances_
assert_true(hasattr(clf, 'feature_importances_'))
X_new = clf.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = clf.feature_importances_ > clf.feature_importances_.mean()
assert_array_almost_equal(X_new, X[:, feature_mask])
# true feature importance ranking
# true_ranking = np.array([3, 1, 8, 2, 10, 9, 4, 11, 0, 6, 7, 5, 12])
# assert_array_equal(true_ranking, feature_importances.argsort())
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
from scipy import sparse
X_sparse = sparse.csr_matrix(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(TypeError, clf.fit, X_sparse, y)
clf = GradientBoostingClassifier().fit(X, y)
assert_raises(TypeError, clf.predict, X_sparse)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert clf.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (0.5, score)
assert clf.oob_improvement_.shape[0] == clf.n_estimators
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert est.estimators_[0, 0].max_depth == 1
for i in range(1, 11):
assert est.estimators_[-i, 0].max_depth == 2
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_min_weight_leaf():
# Regression test for issue #4447
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1],
]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = GradientBoostingRegressor(n_estimators=5, min_weight_fraction_leaf=0.1)
gb.fit(X, y, sample_weight=sample_weight)
assert_true(gb.predict([[1, 0]])[0] > 0.5)
assert_almost_equal(gb.estimators_[0, 0].splitter.min_weight_leaf, 0.2)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
| bsd-3-clause |
WideOpen/datawatch | build_page.py | 1 | 8434 | import sqlite3
import re
import argparse
import json
import datetime
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from dateutil.parser import parse
import tqdm
import GEOCacher
def combine_old_private(df_old, df_private):
df1 = df_old[["gse", "first_mentioned", "released"]]
df2 = df_private[["gse"]]
df2.loc[:, "first_mentioned"] = df_private["published_on"]
df2.loc[:, "released"] = [None] * (df_private.shape[0])
return pd.concat([df1, df2])
prepare_temp_tables = """
CREATE temp table first_mention as
select m.acc, p.paperid
from mentions m, papers p
where m.paperid = p.paperid and p.published_on =
(select min(published_on) from papers p, mentions m0 where p.paperid=m0.paperid and m0.acc = m.acc);
CREATE index temp.first_mention_acc_idx on first_mention(acc);
CREATE temp table gse_times AS
select ds.acc, ds.first_submitted_on as submitted, ds.first_public_on as released, p.published_on as first_mentioned, ds.title, m.paperid as first_paper
from datasets ds
left join first_mention m on m.acc = ds.acc
left join papers p on p.paperid = m.paperid where m.acc GLOB 'GSE*';
"""
def load_dataframes(maxlag):
print "Loading data..."
data_db = sqlite3.connect("data/odw.sqlite")
cache = GEOCacher.GEOCacher("data/cache.sqlite")
print "Preparing extra tables... ",
data_db.executescript(prepare_temp_tables)
print "done"
query_released = """
select acc as gse, doi, papers.title, submitted, first_mentioned, released, journal_nlm from gse_times, papers
where
papers.paperid = gse_times.first_paper
"""
df_released = pd.read_sql_query(query_released, data_db)
query_private = """select distinct acc as gse, published_on, journal_nlm as journal,
doi, title from mentions, papers
where gse not in (select acc from datasets) and mentions.paperid=papers.paperid
and mentions.acc GLOB 'GSE*'
order by published_on asc"""
df_missing = pd.read_sql_query(query_private, data_db)
skip_gses = map(lambda x: x.split()[0], open("whitelist.txt").readlines())
print "Double-checking missing GSE's using NCBI website..."
statuses_released = []
for (i, gse) in enumerate(tqdm.tqdm(df_released.gse)):
if df_released.released[i] is None:
status = cache.check_gse_cached(gse, maxlag=maxlag)
statuses_released.append(False)
if status == "private": # append it to df_missing then
# Index([u'gse', u'published_on', u'journal', u'doi', u'title'], dtype='object')
df_missing = df_missing.append({"gse": gse, "published_on": df_released.first_mentioned[i], "journal": df_released.journal_nlm[i],
"doi": df_released.doi[i], "title": df_released.title[i]}, ignore_index=True)
print "Weird GSE: ", gse
else:
statuses_released.append(True)
nonreleased = np.nonzero(np.invert(np.array(statuses_released)))[0]
# print "Missing GSEs that are mentioned in GEOMetadb :",
# df_released.gse[nonreleased]
df_released = df_released.ix[np.nonzero(statuses_released)[0]]
today_str = str(datetime.date.today())
statuses = []
for (i, gse) in enumerate(tqdm.tqdm(df_missing.gse)):
if gse in skip_gses:
statuses.append("skip")
else:
status = cache.check_gse_cached(gse, maxlag=maxlag)
if status == "present":
data = cache.get_geo_page(gse, maxlag=99999)
reldate = re.search("Public on (.*)<", data)
if reldate is None:
print "Failed to extract date for ", gse
reldate = today_str
else:
reldate = reldate.group(1)
df_released = df_released.append({"doi": df_missing.gse[i], "gse": gse, "submitted" : None, "title": df_missing.title[i], "journal_nlm": df_missing.journal[
i], "first_mentioned": df_missing.published_on[i], "released": reldate}, ignore_index=True)
statuses.append(status)
df_private = df_missing.ix[np.array(statuses) == "private"]
df_private = df_private.sort_values("published_on")
cur = data_db.execute(
"select value from metadata where name = 'GEOmetadb timestamp'")
meta_timestamp = cur.fetchone()[0]
return df_private, df_released, meta_timestamp
def get_hidden_df(df):
df = df.copy()
oneday = datetime.timedelta(1)
timestep = datetime.timedelta(3)
x = []
y = []
c = datetime.date.today() - oneday
mentioned = np.array(map(lambda x: parse(x).date(), df.first_mentioned))
filldate = (datetime.datetime.today() + datetime.timedelta(1)).date()
public = np.array(map(lambda x: parse(x).date(),
df.released.fillna(str(filldate))))
while c >= datetime.date(2008, 1, 1):
mask1 = mentioned < c
mask2 = public > c + oneday
x.append(c)
y.append(np.count_nonzero(mask1 & mask2))
c -= timestep
print "Current overdue: ", y[0]
return pd.DataFrame({"date": x, "overdue": y})
def update_graph(dff):
sns.set_style("white")
sns.set_style("ticks")
sns.set_context("talk")
dff.ix[::10].plot("date", "overdue", figsize=(7, 4), lw=3)
onemonth = datetime.timedelta(30)
plt.xlim(dff.date.min(), dff.date.max()+onemonth)
plt.ylabel("Overdue dataset")
plt.xlabel("Date")
plt.savefig("docs/graph.png")
def gse2url(gse):
return "http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=" + gse
def doi2url(doi):
return "http://dx.doi.org/" + doi
def format_gse(gse):
return """<a href="%s">%s</a>""" % (gse2url(gse), gse)
def format_doi(doi):
return """<a href="%s">%s</a>""" % (doi2url(doi), doi)
tracking_script = """
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-93388605-1', 'auto');
ga('send', 'pageview');
</script>
"""
def update_html(df, metadb_timestamp):
pd.set_option('display.max_colwidth', -1)
table_html = df.to_html(formatters={
"doi": format_doi, "gse": format_gse}, escape=False, index=False, justify="left", classes="table table-striped table-bordered")
html_template_str = unicode(open("output_template.html").read())
n_overdue = df.shape[0]
final_html = html_template_str.format(date_updated=datetime.date.today(), metageo_timestamp=metadb_timestamp,
n_overdue=n_overdue, table_html=table_html, tracking_script=tracking_script)
with open("docs/index.html", "w") as f:
f.write(final_html.encode("utf-8"))
def prepare_data_json(df_private, meta_timestamp, update_date):
result = dict()
result["meta_timestamp"] = meta_timestamp
result["update_date"] = update_date
result["data"] = [row[1].to_dict() for row in df_private.iterrows()]
json.dump(result, open("private_geo.json", "w"))
def main():
parser = argparse.ArgumentParser(description='Build a page for datawatch')
parser.add_argument("--maxlag", default=7)
parser.add_argument("--output", default="")
args = parser.parse_args()
df_private, df_released, meta_timestamp = load_dataframes(args.maxlag)
combined_df = combine_old_private(df_released, df_private)
print "Currently missing entries in GEOMetadb: ", df_private.shape[0]
graph_df = get_hidden_df(combined_df)
if args.output != "":
combined_df.to_csv(args.output + "_combined.csv", encoding='utf-8')
df_released.to_csv(args.output + "_released.csv", encoding='utf-8')
graph_df.to_csv(args.output + "_graph.csv", encoding='utf-8')
prepare_data_json(df_private, meta_timestamp, str(datetime.date.today()))
update_html(df_private, meta_timestamp)
update_graph(graph_df)
if __name__ == "__main__":
main()
| mit |
choderalab/openmoltools | openmoltools/tests/test_openeye.py | 1 | 14775 | from nose.plugins.attrib import attr
import simtk.unit as u
from simtk.openmm import app
import simtk.openmm as mm
import numpy as np
import re
from mdtraj.testing import eq
from unittest import skipIf
from openmoltools import utils, packmol
import os
import openmoltools.openeye
import pandas as pd
import mdtraj as md
from numpy.testing import assert_raises
smiles_fails_with_strictStereo = "CN1CCN(CC1)CCCOc2cc3c(cc2OC)C(=[NH+]c4cc(c(cc4Cl)Cl)OC)C(=C=[N-])C=[NH+]3" # this is insane; C=C=[N-]?
smiles_fails_with_strictStereo = "CN1CCN(CC1)CCCOc2cc3c(cc2OC)C(=[NH+]c4cc(c(cc4Cl)Cl)OC)C(C#N)C=[NH+]3" # more sane version
try:
oechem = utils.import_("openeye.oechem")
if not oechem.OEChemIsLicensed(): raise(ImportError("Need License for OEChem!"))
oequacpac = utils.import_("openeye.oequacpac")
if not oequacpac.OEQuacPacIsLicensed(): raise(ImportError("Need License for oequacpac!"))
oeiupac = utils.import_("openeye.oeiupac")
if not oeiupac.OEIUPACIsLicensed(): raise(ImportError("Need License for OEOmega!"))
oeomega = utils.import_("openeye.oeomega")
if not oeomega.OEOmegaIsLicensed(): raise(ImportError("Need License for OEOmega!"))
HAVE_OE = True
openeye_exception_message = str()
except Exception as e:
HAVE_OE = False
openeye_exception_message = str(e)
try:
import parmed
HAVE_PARMED = True
except ImportError:
HAVE_PARMED = False
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.\n" + openeye_exception_message)
def test_butanol_keepconfs():
m0 = openmoltools.openeye.iupac_to_oemol("butanol")
m1 = openmoltools.openeye.get_charges(m0, keep_confs=1)
eq(m0.NumAtoms(), m1.NumAtoms())
assert m1.NumConfs() == 1, "This OEMol was created to have a single conformation."
assert m1.NumAtoms() == 15, "Butanol should have 15 atoms"
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.\n" + openeye_exception_message)
def test_butanol_unnormalized():
m0 = openmoltools.openeye.iupac_to_oemol("butanol")
m0.SetTitle("MyCustomTitle")
m1 = openmoltools.openeye.get_charges(m0, normalize=False, keep_confs=1)
eq(m0.NumAtoms(), m1.NumAtoms())
assert m1.NumConfs() == 1, "This OEMol was created to have a single conformation."
assert m1.NumAtoms() == 15, "Butanol should have 15 atoms"
assert m0.GetTitle() == m1.GetTitle(), "The title of the molecule should not be changed by normalization."
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_output_mol2():
molecule = openmoltools.openeye.iupac_to_oemol("cyclopentane")
openmoltools.openeye.molecule_to_mol2(molecule, tripos_mol2_filename="testing mol2 output.tripos.mol2")
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_output_mol2_standardize():
molecule = openmoltools.openeye.iupac_to_oemol("cyclopentane")
list(molecule.GetAtoms())[0].SetName("MyNameIsAtom")
openmoltools.openeye.molecule_to_mol2(molecule, tripos_mol2_filename="testing mol2 standardize output.tripos.mol2", standardize=True)
with open("testing mol2 standardize output.tripos.mol2", "r") as outfile:
text = outfile.read()
# This should not find the text we added, to make sure the molecule is standardized.
assert re.search("MyNameIsAtom", text) is None
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_output_mol2_no_standardize():
molecule = openmoltools.openeye.iupac_to_oemol("cyclopentane")
list(molecule.GetAtoms())[0].SetName("MyNameIsAtom")
openmoltools.openeye.molecule_to_mol2(molecule, tripos_mol2_filename="testing mol2 nostandardize output.tripos.mol2", standardize=False)
with open("testing mol2 nostandardize output.tripos.mol2", "r") as outfile:
text = outfile.read()
# This should find the text we added, to make sure the molecule is not standardized.
assert re.search("MyNameIsAtom", text) is not None
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_output_mol2_multiple_confs():
molecule = openmoltools.openeye.iupac_to_oemol("butanol")
multiple_conformers = openmoltools.openeye.generate_conformers(molecule)
openmoltools.openeye.molecule_to_mol2(multiple_conformers, tripos_mol2_filename="testing mol2 multiple conformers.tripos.mol2", conformer=None)
with open("testing mol2 multiple conformers.tripos.mol2", "r") as outfile:
text = outfile.read()
# This should find more than one conformation
assert text.count("@<TRIPOS>MOLECULE") > 1
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_butanol():
m0 = openmoltools.openeye.iupac_to_oemol("butanol")
m1 = openmoltools.openeye.get_charges(m0, keep_confs=-1)
eq(m0.NumAtoms(), m1.NumAtoms())
assert m1.NumConfs() >= 2, "Butanol should have multiple conformers."
assert m1.NumAtoms() == 15, "Butanol should have 15 atoms"
all_data = {}
for k, molecule in enumerate(m1.GetConfs()):
names_to_charges, str_repr = openmoltools.openeye.get_names_to_charges(molecule)
all_data[k] = names_to_charges
eq(sum(names_to_charges.values()), 0.0, decimal=7) # Net charge should be zero
# Build a table of charges indexed by conformer number and atom name
all_data = pd.DataFrame(all_data)
# The standard deviation along the conformer axis should be zero if all conformers have same charges
eq(all_data.std(1).values, np.zeros(m1.NumAtoms()), decimal=7)
with utils.enter_temp_directory():
# Try saving to disk as mol2
openmoltools.openeye.molecule_to_mol2(m1, "out.mol2")
# Make sure MDTraj can read the output
t = md.load("out.mol2")
# Make sure MDTraj can read the charges / topology info
atoms, bonds = md.formats.mol2.mol2_to_dataframes("out.mol2")
# Finally, make sure MDTraj and OpenEye report the same charges.
names_to_charges, str_repr = openmoltools.openeye.get_names_to_charges(m1)
q = atoms.set_index("name").charge
q0 = pd.Series(names_to_charges)
delta = q - q0 # An object containing the charges, with atom names as indices
eq(delta.values, np.zeros_like(delta.values), decimal=4)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_benzene():
m0 = openmoltools.openeye.iupac_to_oemol("benzene")
m1 = openmoltools.openeye.get_charges(m0)
eq(m0.NumAtoms(), m1.NumAtoms())
print(m1.NumConfs())
assert m1.NumConfs() == 1, "Benezene should have 1 conformer"
assert m1.NumAtoms() == 12, "Benezene should have 12 atoms"
names_to_charges, str_repr = openmoltools.openeye.get_names_to_charges(m1)
eq(sum(names_to_charges.values()), 0.0, decimal=7) # Net charge should be zero
with utils.enter_temp_directory():
# Try saving to disk as mol2
openmoltools.openeye.molecule_to_mol2(m1, "out.mol2")
# Make sure MDTraj can read the output
t = md.load("out.mol2")
# Make sure MDTraj can read the charges / topology info
atoms, bonds = md.formats.mol2.mol2_to_dataframes("out.mol2")
# Finally, make sure MDTraj and OpenEye report the same charges.
names_to_charges, str_repr = openmoltools.openeye.get_names_to_charges(m1)
q = atoms.set_index("name").charge
q0 = pd.Series(names_to_charges)
delta = q - q0 # An object containing the charges, with atom names as indices
eq(delta.values, np.zeros_like(delta.values), decimal=4)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_link_in_utils():
m0 = openmoltools.openeye.iupac_to_oemol("benzene")
m1 = openmoltools.openeye.get_charges(m0)
with utils.enter_temp_directory():
# This function was moved from utils to openeye, so check that the old link still works.
utils.molecule_to_mol2(m1, "out.mol2")
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_smiles():
m0 = openmoltools.openeye.smiles_to_oemol("CCCCO")
charged0 = openmoltools.openeye.get_charges(m0)
m1 = openmoltools.openeye.iupac_to_oemol("butanol")
charged1 = openmoltools.openeye.get_charges(m1)
eq(charged0.NumAtoms(), charged1.NumAtoms())
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_ffxml():
with utils.enter_temp_directory():
m0 = openmoltools.openeye.smiles_to_oemol("CCCCO")
charged0 = openmoltools.openeye.get_charges(m0)
m1 = openmoltools.openeye.smiles_to_oemol("ClC(Cl)(Cl)Cl")
charged1 = openmoltools.openeye.get_charges(m1)
trajectories, ffxml = openmoltools.openeye.oemols_to_ffxml([charged0, charged1])
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_ffxml_simulation():
"""Test converting toluene and benzene smiles to oemol to ffxml to openmm simulation."""
with utils.enter_temp_directory():
m0 = openmoltools.openeye.smiles_to_oemol("Cc1ccccc1")
charged0 = openmoltools.openeye.get_charges(m0)
m1 = openmoltools.openeye.smiles_to_oemol("c1ccccc1")
charged1 = openmoltools.openeye.get_charges(m1)
ligands = [charged0, charged1]
n_atoms = [15,12]
trajectories, ffxml = openmoltools.openeye.oemols_to_ffxml(ligands)
eq(len(trajectories),len(ligands))
pdb_filename = utils.get_data_filename("chemicals/proteins/1vii.pdb")
temperature = 300 * u.kelvin
friction = 0.3 / u.picosecond
timestep = 0.01 * u.femtosecond
protein_traj = md.load(pdb_filename)
protein_traj.center_coordinates()
protein_top = protein_traj.top.to_openmm()
protein_xyz = protein_traj.openmm_positions(0)
for k, ligand in enumerate(ligands):
ligand_traj = trajectories[k]
ligand_traj.center_coordinates()
eq(ligand_traj.n_atoms, n_atoms[k])
eq(ligand_traj.n_frames, 1)
#Move the pre-centered ligand sufficiently far away from the protein to avoid a clash.
min_atom_pair_distance = ((ligand_traj.xyz[0] ** 2.).sum(1) ** 0.5).max() + ((protein_traj.xyz[0] ** 2.).sum(1) ** 0.5).max() + 0.3
ligand_traj.xyz += np.array([1.0, 0.0, 0.0]) * min_atom_pair_distance
ligand_xyz = ligand_traj.openmm_positions(0)
ligand_top = ligand_traj.top.to_openmm()
ffxml.seek(0)
forcefield = app.ForceField("amber10.xml", ffxml, "tip3p.xml")
model = app.modeller.Modeller(protein_top, protein_xyz)
model.add(ligand_top, ligand_xyz)
model.addSolvent(forcefield, padding=0.4 * u.nanometer)
system = forcefield.createSystem(model.topology, nonbondedMethod=app.PME, nonbondedCutoff=1.0 * u.nanometers, constraints=app.HAngles)
integrator = mm.LangevinIntegrator(temperature, friction, timestep)
simulation = app.Simulation(model.topology, system, integrator)
simulation.context.setPositions(model.positions)
print("running")
simulation.step(1)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_charge_fail1():
with assert_raises(RuntimeError):
with utils.enter_temp_directory():
openmoltools.openeye.smiles_to_antechamber(smiles_fails_with_strictStereo, "test.mol2", "test.frcmod", strictStereo=True)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_charge_fail2():
with assert_raises(RuntimeError):
m = openmoltools.openeye.smiles_to_oemol(smiles_fails_with_strictStereo)
m = openmoltools.openeye.get_charges(m, strictStereo=True, keep_confs=1)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_charge_success1():
with utils.enter_temp_directory():
openmoltools.openeye.smiles_to_antechamber(smiles_fails_with_strictStereo, "test.mol2", "test.frcmod", strictStereo=False)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_charge_success2():
m = openmoltools.openeye.smiles_to_oemol(smiles_fails_with_strictStereo)
m = openmoltools.openeye.get_charges(m, strictStereo=False)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_oeassigncharges_fail():
with assert_raises(RuntimeError):
# Fail test for OEToolkits (2017.2.1) new charging function
m = openmoltools.openeye.smiles_to_oemol(smiles_fails_with_strictStereo)
m = openmoltools.openeye.get_charges(m, strictStereo=True, legacy=False)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_oeassigncharges_success():
# Success test for OEToolkits (2017.2.1) new charging function
m = openmoltools.openeye.iupac_to_oemol("butanol")
m = openmoltools.openeye.get_charges(m, legacy=False)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
@skipIf(not HAVE_PARMED, "Cannot test without Parmed Chemistry.")
@skipIf(packmol.PACKMOL_PATH is None, "Skipping testing of packmol conversion because packmol not found.")
@attr("parmed")
def test_binary_mixture_rename():
smiles_string0 = "CCCCCC"
smiles_string1 = "CCCCCCCCC"
with utils.enter_temp_directory(): # Prevents creating tons of GAFF files everywhere.
mol2_filename0 = "./A.mol2"
frcmod_filename0 = "./A.frcmod"
mol2_filename1 = "./B.mol2"
frcmod_filename1 = "./B.frcmod"
gaff_mol2_filenames = [mol2_filename0, mol2_filename1]
frcmod_filenames = [frcmod_filename0, frcmod_filename1]
prmtop_filename = "./box.prmtop"
inpcrd_filename = "./box.inpcrd"
openmoltools.openeye.smiles_to_antechamber(smiles_string0, mol2_filename0, frcmod_filename0)
openmoltools.openeye.smiles_to_antechamber(smiles_string1, mol2_filename1, frcmod_filename1)
openmoltools.utils.randomize_mol2_residue_names(gaff_mol2_filenames)
box_pdb_filename = "./box.pdb"
gaff_mol2_filenames = [mol2_filename0, mol2_filename1]
n_monomers = [10, 20]
packed_trj = packmol.pack_box([md.load(mol2) for mol2 in gaff_mol2_filenames], n_monomers)
packed_trj.save(box_pdb_filename)
tleap_cmd = openmoltools.amber.build_mixture_prmtop(gaff_mol2_filenames, frcmod_filenames, box_pdb_filename, prmtop_filename, inpcrd_filename)
prmtop = app.AmberPrmtopFile(prmtop_filename)
inpcrd = app.AmberInpcrdFile(inpcrd_filename)
system = prmtop.createSystem(nonbondedMethod=app.PME, nonbondedCutoff=1.0*u.nanometers, constraints=app.HBonds)
| mit |
thunderhoser/GewitterGefahr | gewittergefahr/scripts/evaluate_storm_tracks.py | 1 | 5660 | """Evaluates a set of storm tracks."""
import argparse
import pandas
from gewittergefahr.gg_io import storm_tracking_io as tracking_io
from gewittergefahr.gg_utils import storm_tracking_utils as tracking_utils
from gewittergefahr.gg_utils import storm_tracking_eval as tracking_eval
from gewittergefahr.gg_utils import echo_top_tracking
from gewittergefahr.gg_utils import time_conversion
MINOR_SEPARATOR_STRING = '\n\n' + '-' * 50 + '\n\n'
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
STORM_OBJECT_COLUMNS = [
tracking_utils.PRIMARY_ID_COLUMN, tracking_utils.FULL_ID_COLUMN,
tracking_utils.VALID_TIME_COLUMN, tracking_utils.SPC_DATE_COLUMN,
tracking_utils.CENTROID_LATITUDE_COLUMN,
tracking_utils.CENTROID_LONGITUDE_COLUMN,
tracking_utils.ROWS_IN_STORM_COLUMN, tracking_utils.COLUMNS_IN_STORM_COLUMN
]
TRACKING_DIR_ARG_NAME = 'input_tracking_dir_name'
FIRST_SPC_DATE_ARG_NAME = 'first_spc_date_string'
LAST_SPC_DATE_ARG_NAME = 'last_spc_date_string'
MYRORSS_DIR_ARG_NAME = 'input_myrorss_dir_name'
RADAR_FIELD_ARG_NAME = 'radar_field_name'
OUTPUT_FILE_ARG_NAME = 'output_file_name'
TRACKING_DIR_HELP_STRING = (
'Name of top-level directory with tracks to evaluate. Files therein will '
'be found by `storm_tracking_io.find_processed_file` and read '
'`storm_tracking_io.read_processed_file`.')
SPC_DATE_HELP_STRING = (
'SPC date (format "yyyymmdd"). Evaluation will be done for all SPC dates '
'in the period `{0:s}`...`{1:s}`.'
).format(FIRST_SPC_DATE_ARG_NAME, LAST_SPC_DATE_ARG_NAME)
MYRORSS_DIR_HELP_STRING = (
'Name of top-level directory with radar files. Files therein will be found'
' by `myrorss_and_mrms_io.find_raw_file` and read by '
'`myrorss_and_mrms_io.read_data_from_sparse_grid_file`.')
RADAR_FIELD_HELP_STRING = (
'Name of field used to compute mismatch error. Must be accepted by '
'`radar_utils.check_field_name`. Must not be a single-height reflectivity '
'field.')
OUTPUT_FILE_HELP_STRING = (
'Path to output file. Will be written by `storm_tracking_eval.write_file`.'
)
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + TRACKING_DIR_ARG_NAME, type=str, required=True,
help=TRACKING_DIR_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + FIRST_SPC_DATE_ARG_NAME, type=str, required=True,
help=SPC_DATE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + LAST_SPC_DATE_ARG_NAME, type=str, required=True,
help=SPC_DATE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + MYRORSS_DIR_ARG_NAME, type=str, required=True,
help=MYRORSS_DIR_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + RADAR_FIELD_ARG_NAME, type=str, required=True,
help=RADAR_FIELD_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + OUTPUT_FILE_ARG_NAME, type=str, required=True,
help=OUTPUT_FILE_HELP_STRING)
def _run(top_tracking_dir_name, first_spc_date_string, last_spc_date_string,
top_myrorss_dir_name, radar_field_name, output_file_name):
"""Evaluates a set of storm tracks.
This is effectively the main method.
:param top_tracking_dir_name: See documentation at top of file.
:param first_spc_date_string: Same.
:param last_spc_date_string: Same.
:param top_myrorss_dir_name: Same.
:param radar_field_name: Same.
:param output_file_name: Same.
"""
spc_date_strings = time_conversion.get_spc_dates_in_range(
first_spc_date_string=first_spc_date_string,
last_spc_date_string=last_spc_date_string)
list_of_storm_object_tables = []
for this_spc_date_string in spc_date_strings:
these_file_names = tracking_io.find_files_one_spc_date(
top_tracking_dir_name=top_tracking_dir_name,
tracking_scale_metres2=
echo_top_tracking.DUMMY_TRACKING_SCALE_METRES2,
source_name=tracking_utils.SEGMOTION_NAME,
spc_date_string=this_spc_date_string, raise_error_if_missing=False
)[0]
if len(these_file_names) == 0:
continue
this_storm_object_table = tracking_io.read_many_files(
these_file_names
)[STORM_OBJECT_COLUMNS]
list_of_storm_object_tables.append(this_storm_object_table)
if this_spc_date_string != spc_date_strings[-1]:
print(MINOR_SEPARATOR_STRING)
if len(list_of_storm_object_tables) == 1:
continue
list_of_storm_object_tables[-1] = list_of_storm_object_tables[-1].align(
list_of_storm_object_tables[0], axis=1
)[0]
print(SEPARATOR_STRING)
storm_object_table = pandas.concat(
list_of_storm_object_tables, axis=0, ignore_index=True)
evaluation_dict = tracking_eval.evaluate_tracks(
storm_object_table=storm_object_table,
top_myrorss_dir_name=top_myrorss_dir_name,
radar_field_name=radar_field_name)
print('Writing results to: "{0:s}"...'.format(output_file_name))
tracking_eval.write_file(evaluation_dict=evaluation_dict,
pickle_file_name=output_file_name)
if __name__ == '__main__':
INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()
_run(
top_tracking_dir_name=getattr(INPUT_ARG_OBJECT, TRACKING_DIR_ARG_NAME),
first_spc_date_string=getattr(
INPUT_ARG_OBJECT, FIRST_SPC_DATE_ARG_NAME),
last_spc_date_string=getattr(INPUT_ARG_OBJECT, LAST_SPC_DATE_ARG_NAME),
top_myrorss_dir_name=getattr(INPUT_ARG_OBJECT, MYRORSS_DIR_ARG_NAME),
radar_field_name=getattr(INPUT_ARG_OBJECT, RADAR_FIELD_ARG_NAME),
output_file_name=getattr(INPUT_ARG_OBJECT, OUTPUT_FILE_ARG_NAME)
)
| mit |
maxwell-lv/MyQuant | myquant.py | 1 | 7210 | from zipline.data.bundles import register
import pandas as pd
import os
import numpy as np
import matplotlib.pyplot as plt
from zipline.api import (
schedule_function,
symbol,
order_target_percent,
date_rules,
record
)
import re
from zipline.algorithm import TradingAlgorithm
from zipline.finance.trading import TradingEnvironment
from zipline.utils.calendars import get_calendar, register_calendar
from zipline.finance import trading
from zipline.utils.factory import create_simulation_parameters
from zipline.data.bundles.core import load
from zipline.data.data_portal import DataPortal
from zipline.api import order_target, record, symbol, order_target_percent, order_percent, attach_pipeline, pipeline_output
from loader import load_market_data
from cn_stock_holidays.zipline.default_calendar import shsz_calendar
from zipline.data.bundles.maxdl import maxdl_bundle
from zipline.finance.commission import PerDollar
from zipline.finance.blotter import Blotter
from zipline.finance.slippage import FixedSlippage
from zipline.pipeline.factors import RSI, SimpleMovingAverage, BollingerBands
from zipline.pipeline import Pipeline
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.loaders import USEquityPricingLoader
bundle = 'maxdl'
start_session_str = '2010-01-18'
end_session_str = '2016-12-30'
register(
bundle,
maxdl_bundle,
"SHSZ",
pd.Timestamp(start_session_str, tz='utc'),
pd.Timestamp(end_session_str, tz='utc')
)
bundle_data = load(bundle, os.environ, None,)
pipeline_loader = USEquityPricingLoader(bundle_data.equity_daily_bar_reader, bundle_data.adjustment_reader)
def choose_loader(column):
if column in USEquityPricing.columns:
return pipeline_loader
raise ValueError(
"No PipelineLoader registered for column %s." % column
)
prefix, connstr = re.split(
r'sqlite:///',
str(bundle_data.asset_finder.engine.url),
maxsplit=1,
)
env = trading.environment = TradingEnvironment(asset_db_path=connstr,
trading_calendar=shsz_calendar,
bm_symbol='000001.SS',
load=load_market_data)
first_trading_day = bundle_data.equity_daily_bar_reader.first_trading_day
data = DataPortal(
env.asset_finder, shsz_calendar,
first_trading_day=first_trading_day,
# equity_minute_reader=bundle_data.equity_minute_bar_reader,
equity_daily_reader=bundle_data.equity_daily_bar_reader,
adjustment_reader=bundle_data.adjustment_reader,
)
def initialize(context):
context.i = 0
context.full = False
context.state = 0
bb = BollingerBands(window_length=42, k=2)
lower = bb.lower
upper = bb.upper
# sma = SimpleMovingAverage(inputs=[USEquityPricing.close], window_length=10)
pipe = Pipeline(
columns={
'lower':lower,
'upper':upper
}
)
# pipe = Pipeline(
# columns={
# '10_day':sma
# }
# )
attach_pipeline(pipe, 'my_pipeline')
#schedule_function(handle_daily_data, date_rules.every_day())
def before_trading_start(context, data):
context.output = pipeline_output('my_pipeline')
def handle_daily_data(context, data):
sym = symbol('600418.SS')
pipe = pipeline_output('my_pipeline')
price = data.current(sym, 'close')
upper = pipe['upper'][sym]
lower = pipe['lower'][sym]
if price == 0.0:
print(price)
return
if context.state == 0:
if price < lower:
context.state = 1
elif price > upper:
context.state = 2
elif context.state == 1:
if price > lower:
order_target_percent(sym, 1.0)
context.state = 0
elif context.state == 2:
if lower < price < upper:
order_target_percent(sym, 0)
context.state = 0
record(jhqc = data.current(sym, 'price'),
upper = pipe['upper'][sym],
lower = pipe['lower'][sym]
)
# print(data.current(symbol('002337.SZ'), 'open'))
# Skip first 300 days to get full windows
# context.i += 1
# if context.i < context.window_length:
# return
# Compute averages
# history() has to be called with the same params
# from above and returns a pandas dataframe.
# short_mavg = data.history(sym, 'price', 100, '1d').mean()
# long_mavg = data.history(sym, 'price', 300, '1d').mean()
# Trading logic
# if short_mavg > long_mavg:
# order_target orders as many shares as needed to
# achieve the desired number of shares.
# if not context.full:
# order_target_percent(sym, 0.9)
# context.full = True
#order_target_percent(sym, 1)
# context.order_percent(sym, 1)
# elif short_mavg < long_mavg:
# if context.full:
# order_target_percent(sym, 0)
# context.full = False
#order_target_percent(sym, 0)
# context.order_percent(sym, 0)
# Save values for later inspection
# record(sxkj=data[sym].price,
# short_mavg=short_mavg,
# long_mavg=long_mavg)
def analyse(context, perf):
fig = plt.figure()
ax1 = fig.add_subplot(211)
perf.portfolio_value.plot(ax=ax1)
ax1.set_ylabel('portfolio value in ¥')
ax2 = fig.add_subplot(212)
perf['jhqc'].plot(ax=ax2)
perf[['upper', 'lower']].plot(ax=ax2)
perf_trans = perf.ix[[t != [] for t in perf.transactions]]
buys = perf_trans.ix[[t[0]['amount'] > 0 for t in perf_trans.transactions]]
sells = perf_trans.ix[[t[0]['amount'] < 0 for t in perf_trans.transactions]]
ax2.plot(buys.index, perf.lower.ix[buys.index],
'^', markersize=10, color='m')
ax2.plot(sells.index, perf.upper.ix[sells.index],
'v', markersize=10, color='k')
ax2.set_ylabel('price in ¥')
plt.legend(loc=0)
plt.show()
if __name__ == "__main__":
data_frequency = "daily"
sim_params = create_simulation_parameters(
start=pd.to_datetime("2014-01-05 00:00:00").tz_localize("Asia/Shanghai"),
end=pd.to_datetime("2016-12-30 00:00:00").tz_localize("Asia/Shanghai"),
data_frequency=data_frequency, emission_rate="daily", trading_calendar=shsz_calendar)
blotter = Blotter(data_frequency = data_frequency,
asset_finder=env.asset_finder,
slippage_func=FixedSlippage(),
commission = PerDollar(cost=0.00025))
perf = TradingAlgorithm(initialize=initialize,
handle_data=handle_daily_data,
sim_params=sim_params,
env=trading.environment,
trading_calendar=shsz_calendar,
get_pipeline_loader = choose_loader,
before_trading_start=before_trading_start,
analyze=analyse,
blotter=blotter
).run(data, overwrite_sim_params=False)
perf.to_pickle('d:\\temp\\output.pickle')
| gpl-3.0 |
fzalkow/scikit-learn | examples/decomposition/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
nikitasingh981/scikit-learn | examples/applications/plot_stock_market.py | 76 | 8522 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
try:
from matplotlib.finance import quotes_historical_yahoo_ochl
except ImportError:
# quotes_historical_yahoo_ochl was named quotes_historical_yahoo before matplotlib 1.4
from matplotlib.finance import quotes_historical_yahoo as quotes_historical_yahoo_ochl
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [quotes_historical_yahoo_ochl(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
ibis-project/ibis | ibis/backends/dask/execution/indexing.py | 1 | 1869 | """Execution rules for ops.Where operations"""
import dask.dataframe as dd
import numpy as np
import ibis.expr.operations as ops
from ibis.backends.pandas.core import boolean_types, scalar_types
from ibis.backends.pandas.execution.generic import (
execute_node_where_scalar_scalar_scalar,
execute_node_where_series_series_series,
)
from ..dispatch import execute_node
from .util import TypeRegistrationDict, register_types_to_dispatcher
DASK_DISPATCH_TYPES: TypeRegistrationDict = {
ops.Where: [
(
(dd.Series, dd.Series, dd.Series),
execute_node_where_series_series_series,
),
(
(dd.Series, dd.Series, scalar_types),
execute_node_where_series_series_series,
),
(
(boolean_types, dd.Series, dd.Series,),
execute_node_where_scalar_scalar_scalar,
),
]
}
register_types_to_dispatcher(execute_node, DASK_DISPATCH_TYPES)
def execute_node_where_series_scalar_scalar(op, cond, true, false, **kwargs):
return dd.from_array(np.repeat(true, len(cond))).where(cond, other=false)
for scalar_type in scalar_types:
execute_node.register(ops.Where, dd.Series, scalar_type, scalar_type)(
execute_node_where_series_scalar_scalar
)
@execute_node.register(ops.Where, boolean_types, dd.Series, scalar_types)
def execute_node_where_scalar_series_scalar(op, cond, true, false, **kwargs):
if cond:
return true
else:
# TODO double check this is the right way to do this
out = dd.from_array(np.repeat(false, len(true)))
out.index = true.index
return out
@execute_node.register(ops.Where, boolean_types, scalar_types, dd.Series)
def execute_node_where_scalar_scalar_series(op, cond, true, false, **kwargs):
return dd.from_array(np.repeat(true, len(false))) if cond else false
| apache-2.0 |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/sklearn/manifold/isomap.py | 5 | 7136 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically
'FW' : Floyd-Warshall algorithm
'D' : Dijkstra algorithm with Fibonacci Heaps
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
[1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
ClinicalGraphics/scikit-image | doc/examples/filters/plot_entropy.py | 6 | 2233 | """
=======
Entropy
=======
In information theory, information entropy is the log-base-2 of the number of
possible outcomes for a message.
For an image, local entropy is related to the complexity contained in a given
neighborhood, typically defined by a structuring element. The entropy filter can
detect subtle variations in the local gray level distribution.
In the first example, the image is composed of two surfaces with two slightly
different distributions. The image has a uniform random distribution in the
range [-14, +14] in the middle of the image and a uniform random distribution in
the range [-15, 15] at the image borders, both centered at a gray value of 128.
To detect the central square, we compute the local entropy measure using a
circular structuring element of a radius big enough to capture the local gray
level distribution. The second example shows how to detect texture in the camera
image using a smaller structuring element.
"""
import matplotlib.pyplot as plt
import numpy as np
from skimage import data
from skimage.util import img_as_ubyte
from skimage.filters.rank import entropy
from skimage.morphology import disk
# First example: object detection.
noise_mask = 28 * np.ones((128, 128), dtype=np.uint8)
noise_mask[32:-32, 32:-32] = 30
noise = (noise_mask * np.random.random(noise_mask.shape) - 0.5 *
noise_mask).astype(np.uint8)
img = noise + 128
entr_img = entropy(img, disk(10))
fig, (ax0, ax1, ax2) = plt.subplots(1, 3, figsize=(8, 3))
ax0.imshow(noise_mask, cmap=plt.cm.gray)
ax0.set_xlabel("Noise mask")
ax1.imshow(img, cmap=plt.cm.gray)
ax1.set_xlabel("Noisy image")
ax2.imshow(entr_img)
ax2.set_xlabel("Local entropy")
fig.tight_layout()
# Second example: texture detection.
image = img_as_ubyte(data.camera())
fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(10, 4), sharex=True,
sharey=True,
subplot_kw={"adjustable": "box-forced"})
img0 = ax0.imshow(image, cmap=plt.cm.gray)
ax0.set_title("Image")
ax0.axis("off")
fig.colorbar(img0, ax=ax0)
img1 = ax1.imshow(entropy(image, disk(5)), cmap=plt.cm.jet)
ax1.set_title("Entropy")
ax1.axis("off")
fig.colorbar(img1, ax=ax1)
fig.tight_layout()
plt.show()
| bsd-3-clause |
dimonaks/siman | siman/bands.py | 1 | 5975 | #!/usr/bin/env python
# -*- coding=utf-8 -*-
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib.gridspec import GridSpec
# import pymatgen.io
# print(dir(pymatgen.io))
from pymatgen.io.vasp import Vasprun
from pymatgen.electronic_structure.core import Spin, OrbitalType
from siman.small_functions import makedir
def rgbline(ax, k, e, red, green, blue, alpha=1.):
# creation of segments based on
# http://nbviewer.ipython.org/urls/raw.github.com/dpsanders/matplotlib-examples/master/colorline.ipynb
pts = np.array([k, e]).T.reshape(-1, 1, 2)
seg = np.concatenate([pts[:-1], pts[1:]], axis=1)
nseg = len(k) - 1
r = [0.5 * (red[i] + red[i + 1]) for i in range(nseg)]
g = [0.5 * (green[i] + green[i + 1]) for i in range(nseg)]
b = [0.5 * (blue[i] + blue[i + 1]) for i in range(nseg)]
a = np.ones(nseg, np.float) * alpha
lc = LineCollection(seg, colors=list(zip(r, g, b, a)), linewidth=2)
ax.add_collection(lc)
def read_kpoint_labels(filename):
"""
Read commented kpoint labels from VASP KPOINTS file
"""
labels = []
with open(filename, 'r') as f:
[f.readline() for i in range(4)]
# next(f)
lab = ''
for line in f:
# print (line)
if '!' in line:
lab_next = line.split('!')[1].strip()
# print (lab_next, 'q')
if lab_next and lab_next != lab:
# print (lab_next)
labels.append(lab_next)
lab = lab_next
return labels
# if __name__ == "__main__":
def plot_bands(vasprun_dos, vasprun_bands, kpoints, element, ylim = (None, None)):
# read data
# Credit https://github.com/gVallverdu/bandstructureplots
# ---------
# kpoints labels
# labels = [r"$L$", r"$\Gamma$", r"$X$", r"$U,K$", r"$\Gamma$"]
labels = read_kpoint_labels(kpoints)
# density of states
# dosrun = Vasprun(vasprun_dos)
dosrun = Vasprun(vasprun_bands)
spd_dos = dosrun.complete_dos.get_spd_dos()
# bands
run = Vasprun(vasprun_bands, parse_projected_eigen=True)
bands = run.get_band_structure(kpoints,
line_mode=True,
efermi=dosrun.efermi)
# set up matplotlib plot
# ----------------------
# general options for plot
font = {'family': 'serif', 'size': 24}
plt.rc('font', **font)
# set up 2 graph with aspec ration 2/1
# plot 1: bands diagram
# plot 2: Density of States
gs = GridSpec(1, 2, width_ratios=[2, 1])
fig = plt.figure(figsize=(11.69, 8.27))
# fig.suptitle("Bands diagram of copper")
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1]) # , sharey=ax1)
# set ylim for the plot
# ---------------------
if ylim[0]:
emin = ylim[0]
else:
emin = -10.
if ylim[1]:
emax = ylim[1]
else:
emax = 10.
ax1.set_ylim(emin, emax)
ax2.set_ylim(emin, emax)
# Band Diagram
# ------------
name = element
pbands = bands.get_projections_on_elements_and_orbitals({name: ["s", "p", "d"]})
# print(bands)
# compute s, p, d normalized contributions
contrib = np.zeros((bands.nb_bands, len(bands.kpoints), 3))
# print(pbands)
for b in range(bands.nb_bands):
for k in range(len(bands.kpoints)):
# print(Spin.up)
sc = pbands[Spin.up][b][k][name]["s"]**2
pc = pbands[Spin.up][b][k][name]["p"]**2
dc = pbands[Spin.up][b][k][name]["d"]**2
tot = sc + pc + dc
if tot != 0.0:
contrib[b, k, 0] = sc / tot
contrib[b, k, 1] = pc / tot
contrib[b, k, 2] = dc / tot
# plot bands using rgb mapping
for b in range(bands.nb_bands):
rgbline(ax1,
range(len(bands.kpoints)),
[e - bands.efermi for e in bands.bands[Spin.up][b]],
contrib[b, :, 0],
contrib[b, :, 1],
contrib[b, :, 2])
# style
ax1.set_xlabel("k-points")
ax1.set_ylabel(r"$E - E_f$ / eV")
ax1.grid()
# fermi level at 0
ax1.hlines(y=0, xmin=0, xmax=len(bands.kpoints), color="k", linestyle = '--', lw=1)
# labels
nlabs = len(labels)
step = len(bands.kpoints) / (nlabs - 1)
for i, lab in enumerate(labels):
ax1.vlines(i * step, emin, emax, "k")
ax1.set_xticks([i * step for i in range(nlabs)])
ax1.set_xticklabels(labels)
ax1.set_xlim(0, len(bands.kpoints))
# Density of states
# ----------------
ax2.set_yticklabels([])
ax2.grid()
ax2.set_xlim(1e-4, 5)
ax2.set_xticklabels([])
ax2.hlines(y=0, xmin=0, xmax=5, color="k", lw=2)
ax2.set_xlabel("Density of States", labelpad=28)
# spd contribution
ax2.plot(spd_dos[OrbitalType.s].densities[Spin.up],
dosrun.tdos.energies - dosrun.efermi,
"r-", label="3s", lw=2)
ax2.plot(spd_dos[OrbitalType.p].densities[Spin.up],
dosrun.tdos.energies - dosrun.efermi,
"g-", label="3p", lw=2)
ax2.plot(spd_dos[OrbitalType.d].densities[Spin.up],
dosrun.tdos.energies - dosrun.efermi,
"b-", label="3d", lw=2)
# total dos
ax2.fill_between(dosrun.tdos.densities[Spin.up],
0,
dosrun.tdos.energies - dosrun.efermi,
color=(0.7, 0.7, 0.7),
facecolor=(0.7, 0.7, 0.7))
ax2.plot(dosrun.tdos.densities[Spin.up],
dosrun.tdos.energies - dosrun.efermi,
color=(0.6, 0.6, 0.6),
label="total DOS")
# plot format style
# -----------------
ax2.legend(fancybox=True, shadow=True, prop={'size': 18})
plt.subplots_adjust(wspace=0)
# plt.show()
makedir("figs/bands.png")
plt.savefig("figs/bands.png")
| gpl-2.0 |
appapantula/scikit-learn | sklearn/cluster/tests/test_birch.py | 342 | 5603 | """
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
| bsd-3-clause |
deepesch/scikit-learn | examples/text/mlcomp_sparse_document_classification.py | 292 | 4498 | """
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix of the %s classifier' % name)
pl.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
pl.show()
| bsd-3-clause |
ChanChiChoi/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 234 | 12267 | # Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import frombuffer_empty
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
bavardage/statsmodels | statsmodels/graphics/tests/test_boxplots.py | 4 | 1261 | import numpy as np
from numpy.testing import dec
from statsmodels.graphics.boxplots import violinplot, beanplot
from statsmodels.datasets import anes96
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
@dec.skipif(not have_matplotlib)
def test_violinplot_beanplot():
"""Test violinplot and beanplot with the same dataset."""
data = anes96.load_pandas()
party_ID = np.arange(7)
labels = ["Strong Democrat", "Weak Democrat", "Independent-Democrat",
"Independent-Independent", "Independent-Republican",
"Weak Republican", "Strong Republican"]
age = [data.exog['age'][data.endog == id] for id in party_ID]
fig = plt.figure()
ax = fig.add_subplot(111)
violinplot(age, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30})
plt.close(fig)
fig = plt.figure()
ax = fig.add_subplot(111)
beanplot(age, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30})
plt.close(fig)
| bsd-3-clause |
shangwuhencc/scikit-learn | sklearn/datasets/tests/test_20news.py | 280 | 3045 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
SaganBolliger/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/legend.py | 69 | 30705 | """
Place a legend on the axes at location loc. Labels are a
sequence of strings and loc can be a string or an integer
specifying the legend location
The location codes are
'best' : 0, (only implemented for axis legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
Return value is a sequence of text, line instances that make
up the legend
"""
from __future__ import division
import warnings
import numpy as np
from matplotlib import rcParams
from matplotlib.artist import Artist
from matplotlib.cbook import is_string_like, iterable, silent_list, safezip
from matplotlib.font_manager import FontProperties
from matplotlib.lines import Line2D
from matplotlib.patches import Patch, Rectangle, Shadow, FancyBboxPatch
from matplotlib.collections import LineCollection, RegularPolyCollection
from matplotlib.transforms import Bbox
from matplotlib.offsetbox import HPacker, VPacker, PackerBase, TextArea, DrawingArea
class Legend(Artist):
"""
Place a legend on the axes at location loc. Labels are a
sequence of strings and loc can be a string or an integer
specifying the legend location
The location codes are::
'best' : 0, (only implemented for axis legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
loc can be a tuple of the noramilzed coordinate values with
respect its parent.
Return value is a sequence of text, line instances that make
up the legend
"""
codes = {'best' : 0, # only implemented for axis legends
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
}
zorder = 5
def __str__(self):
return "Legend"
def __init__(self, parent, handles, labels,
loc = None,
numpoints = None, # the number of points in the legend line
markerscale = None, # the relative size of legend markers vs. original
scatterpoints = 3, # TODO: may be an rcParam
scatteryoffsets=None,
prop = None, # properties for the legend texts
# the following dimensions are in axes coords
pad = None, # deprecated; use borderpad
labelsep = None, # deprecated; use labelspacing
handlelen = None, # deprecated; use handlelength
handletextsep = None, # deprecated; use handletextpad
axespad = None, # deprecated; use borderaxespad
# spacing & pad defined as a fractionof the font-size
borderpad = None, # the whitespace inside the legend border
labelspacing=None, #the vertical space between the legend entries
handlelength=None, # the length of the legend handles
handletextpad=None, # the pad between the legend handle and text
borderaxespad=None, # the pad between the axes and legend border
columnspacing=None, # spacing between columns
ncol=1, # number of columns
mode=None, # mode for horizontal distribution of columns. None, "expand"
fancybox=None, # True use a fancy box, false use a rounded box, none use rc
shadow = None,
):
"""
- *parent* : the artist that contains the legend
- *handles* : a list of artists (lines, patches) to add to the legend
- *labels* : a list of strings to label the legend
Optional keyword arguments:
================ ==================================================================
Keyword Description
================ ==================================================================
loc a location code or a tuple of coordinates
numpoints the number of points in the legend line
prop the font property
markerscale the relative size of legend markers vs. original
fancybox if True, draw a frame with a round fancybox. If None, use rc
shadow if True, draw a shadow behind legend
scatteryoffsets a list of yoffsets for scatter symbols in legend
borderpad the fractional whitespace inside the legend border
labelspacing the vertical space between the legend entries
handlelength the length of the legend handles
handletextpad the pad between the legend handle and text
borderaxespad the pad between the axes and legend border
columnspacing the spacing between columns
================ ==================================================================
The dimensions of pad and spacing are given as a fraction of the
fontsize. Values from rcParams will be used if None.
"""
from matplotlib.axes import Axes # local import only to avoid circularity
from matplotlib.figure import Figure # local import only to avoid circularity
Artist.__init__(self)
if prop is None:
self.prop=FontProperties(size=rcParams["legend.fontsize"])
else:
self.prop=prop
self.fontsize = self.prop.get_size_in_points()
propnames=['numpoints', 'markerscale', 'shadow', "columnspacing",
"scatterpoints"]
localdict = locals()
for name in propnames:
if localdict[name] is None:
value = rcParams["legend."+name]
else:
value = localdict[name]
setattr(self, name, value)
# Take care the deprecated keywords
deprecated_kwds = {"pad":"borderpad",
"labelsep":"labelspacing",
"handlelen":"handlelength",
"handletextsep":"handletextpad",
"axespad":"borderaxespad"}
# convert values of deprecated keywords (ginve in axes coords)
# to new vaules in a fraction of the font size
# conversion factor
bbox = parent.bbox
axessize_fontsize = min(bbox.width, bbox.height)/self.fontsize
for k, v in deprecated_kwds.items():
# use deprecated value if not None and if their newer
# counter part is None.
if localdict[k] is not None and localdict[v] is None:
warnings.warn("Use '%s' instead of '%s'." % (v, k),
DeprecationWarning)
setattr(self, v, localdict[k]*axessize_fontsize)
continue
# Otherwise, use new keywords
if localdict[v] is None:
setattr(self, v, rcParams["legend."+v])
else:
setattr(self, v, localdict[v])
del localdict
self._ncol = ncol
if self.numpoints <= 0:
raise ValueError("numpoints must be >= 0; it was %d"% numpoints)
# introduce y-offset for handles of the scatter plot
if scatteryoffsets is None:
self._scatteryoffsets = np.array([3./8., 4./8., 2.5/8.])
else:
self._scatteryoffsets = np.asarray(scatteryoffsets)
reps = int(self.numpoints / len(self._scatteryoffsets)) + 1
self._scatteryoffsets = np.tile(self._scatteryoffsets, reps)[:self.scatterpoints]
# _legend_box is an OffsetBox instance that contains all
# legend items and will be initialized from _init_legend_box()
# method.
self._legend_box = None
if isinstance(parent,Axes):
self.isaxes = True
self.set_figure(parent.figure)
elif isinstance(parent,Figure):
self.isaxes = False
self.set_figure(parent)
else:
raise TypeError("Legend needs either Axes or Figure as parent")
self.parent = parent
if loc is None:
loc = rcParams["legend.loc"]
if not self.isaxes and loc in [0,'best']:
loc = 'upper right'
if is_string_like(loc):
if loc not in self.codes:
if self.isaxes:
warnings.warn('Unrecognized location "%s". Falling back on "best"; '
'valid locations are\n\t%s\n'
% (loc, '\n\t'.join(self.codes.keys())))
loc = 0
else:
warnings.warn('Unrecognized location "%s". Falling back on "upper right"; '
'valid locations are\n\t%s\n'
% (loc, '\n\t'.join(self.codes.keys())))
loc = 1
else:
loc = self.codes[loc]
if not self.isaxes and loc == 0:
warnings.warn('Automatic legend placement (loc="best") not implemented for figure legend. '
'Falling back on "upper right".')
loc = 1
self._loc = loc
self._mode = mode
# We use FancyBboxPatch to draw a legend frame. The location
# and size of the box will be updated during the drawing time.
self.legendPatch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.fontsize,
snap=True
)
# The width and height of the legendPatch will be set (in the
# draw()) to the length that includes the padding. Thus we set
# pad=0 here.
if fancybox is None:
fancybox = rcParams["legend.fancybox"]
if fancybox == True:
self.legendPatch.set_boxstyle("round",pad=0,
rounding_size=0.2)
else:
self.legendPatch.set_boxstyle("square",pad=0)
self._set_artist_props(self.legendPatch)
self._drawFrame = True
# init with null renderer
self._init_legend_box(handles, labels)
self._last_fontsize_points = self.fontsize
def _set_artist_props(self, a):
"""
set the boilerplate props for artists added to axes
"""
a.set_figure(self.figure)
for c in self.get_children():
c.set_figure(self.figure)
a.set_transform(self.get_transform())
def _findoffset_best(self, width, height, xdescent, ydescent, renderer):
"Heper function to locate the legend at its best position"
ox, oy = self._find_best_position(width, height, renderer)
return ox+xdescent, oy+ydescent
def _findoffset_loc(self, width, height, xdescent, ydescent, renderer):
"Heper function to locate the legend using the location code"
if iterable(self._loc) and len(self._loc)==2:
# when loc is a tuple of axes(or figure) coordinates.
fx, fy = self._loc
bbox = self.parent.bbox
x, y = bbox.x0 + bbox.width * fx, bbox.y0 + bbox.height * fy
else:
bbox = Bbox.from_bounds(0, 0, width, height)
x, y = self._get_anchored_bbox(self._loc, bbox, self.parent.bbox, renderer)
return x+xdescent, y+ydescent
def draw(self, renderer):
"Draw everything that belongs to the legend"
if not self.get_visible(): return
self._update_legend_box(renderer)
renderer.open_group('legend')
# find_offset function will be provided to _legend_box and
# _legend_box will draw itself at the location of the return
# value of the find_offset.
if self._loc == 0:
_findoffset = self._findoffset_best
else:
_findoffset = self._findoffset_loc
def findoffset(width, height, xdescent, ydescent):
return _findoffset(width, height, xdescent, ydescent, renderer)
self._legend_box.set_offset(findoffset)
fontsize = renderer.points_to_pixels(self.fontsize)
# if mode == fill, set the width of the legend_box to the
# width of the paret (minus pads)
if self._mode in ["expand"]:
pad = 2*(self.borderaxespad+self.borderpad)*fontsize
self._legend_box.set_width(self.parent.bbox.width-pad)
if self._drawFrame:
# update the location and size of the legend
bbox = self._legend_box.get_window_extent(renderer)
self.legendPatch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
self.legendPatch.set_mutation_scale(fontsize)
if self.shadow:
shadow = Shadow(self.legendPatch, 2, -2)
shadow.draw(renderer)
self.legendPatch.draw(renderer)
self._legend_box.draw(renderer)
renderer.close_group('legend')
def _approx_text_height(self, renderer=None):
"""
Return the approximate height of the text. This is used to place
the legend handle.
"""
if renderer is None:
return self.fontsize
else:
return renderer.points_to_pixels(self.fontsize)
def _init_legend_box(self, handles, labels):
"""
Initiallize the legend_box. The legend_box is an instance of
the OffsetBox, which is packed with legend handles and
texts. Once packed, their location is calculated during the
drawing time.
"""
fontsize = self.fontsize
# legend_box is a HPacker, horizontally packed with
# columns. Each column is a VPacker, vertically packed with
# legend items. Each legend item is HPacker packed with
# legend handleBox and labelBox. handleBox is an instance of
# offsetbox.DrawingArea which contains legend handle. labelBox
# is an instance of offsetbox.TextArea which contains legend
# text.
text_list = [] # the list of text instances
handle_list = [] # the list of text instances
label_prop = dict(verticalalignment='baseline',
horizontalalignment='left',
fontproperties=self.prop,
)
labelboxes = []
for l in labels:
textbox = TextArea(l, textprops=label_prop,
multilinebaseline=True, minimumdescent=True)
text_list.append(textbox._text)
labelboxes.append(textbox)
handleboxes = []
# The approximate height and descent of text. These values are
# only used for plotting the legend handle.
height = self._approx_text_height() * 0.7
descent = 0.
# each handle needs to be drawn inside a box of (x, y, w, h) =
# (0, -descent, width, height). And their corrdinates should
# be given in the display coordinates.
# NOTE : the coordinates will be updated again in
# _update_legend_box() method.
# The transformation of each handle will be automatically set
# to self.get_trasnform(). If the artist does not uses its
# default trasnform (eg, Collections), you need to
# manually set their transform to the self.get_transform().
for handle in handles:
if isinstance(handle, RegularPolyCollection):
npoints = self.scatterpoints
else:
npoints = self.numpoints
if npoints > 1:
# we put some pad here to compensate the size of the
# marker
xdata = np.linspace(0.3*fontsize,
(self.handlelength-0.3)*fontsize,
npoints)
xdata_marker = xdata
elif npoints == 1:
xdata = np.linspace(0, self.handlelength*fontsize, 2)
xdata_marker = [0.5*self.handlelength*fontsize]
if isinstance(handle, Line2D):
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
legline.update_from(handle)
self._set_artist_props(legline) # after update
legline.set_clip_box(None)
legline.set_clip_path(None)
legline.set_drawstyle('default')
legline.set_marker('None')
handle_list.append(legline)
legline_marker = Line2D(xdata_marker, ydata[:len(xdata_marker)])
legline_marker.update_from(handle)
self._set_artist_props(legline_marker)
legline_marker.set_clip_box(None)
legline_marker.set_clip_path(None)
legline_marker.set_linestyle('None')
# we don't want to add this to the return list because
# the texts and handles are assumed to be in one-to-one
# correpondence.
legline._legmarker = legline_marker
elif isinstance(handle, Patch):
p = Rectangle(xy=(0., 0.),
width = self.handlelength*fontsize,
height=(height-descent),
)
p.update_from(handle)
self._set_artist_props(p)
p.set_clip_box(None)
p.set_clip_path(None)
handle_list.append(p)
elif isinstance(handle, LineCollection):
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
self._set_artist_props(legline)
legline.set_clip_box(None)
legline.set_clip_path(None)
lw = handle.get_linewidth()[0]
dashes = handle.get_dashes()[0]
color = handle.get_colors()[0]
legline.set_color(color)
legline.set_linewidth(lw)
legline.set_dashes(dashes)
handle_list.append(legline)
elif isinstance(handle, RegularPolyCollection):
#ydata = self._scatteryoffsets
ydata = height*self._scatteryoffsets
size_max, size_min = max(handle.get_sizes()),\
min(handle.get_sizes())
# we may need to scale these sizes by "markerscale"
# attribute. But other handle types does not seem
# to care about this attribute and it is currently ignored.
if self.scatterpoints < 4:
sizes = [.5*(size_max+size_min), size_max,
size_min]
else:
sizes = (size_max-size_min)*np.linspace(0,1,self.scatterpoints)+size_min
p = type(handle)(handle.get_numsides(),
rotation=handle.get_rotation(),
sizes=sizes,
offsets=zip(xdata_marker,ydata),
transOffset=self.get_transform(),
)
p.update_from(handle)
p.set_figure(self.figure)
p.set_clip_box(None)
p.set_clip_path(None)
handle_list.append(p)
else:
handle_list.append(None)
handlebox = DrawingArea(width=self.handlelength*fontsize,
height=height,
xdescent=0., ydescent=descent)
handle = handle_list[-1]
handlebox.add_artist(handle)
if hasattr(handle, "_legmarker"):
handlebox.add_artist(handle._legmarker)
handleboxes.append(handlebox)
# We calculate number of lows in each column. The first
# (num_largecol) columns will have (nrows+1) rows, and remaing
# (num_smallcol) columns will have (nrows) rows.
nrows, num_largecol = divmod(len(handleboxes), self._ncol)
num_smallcol = self._ncol-num_largecol
# starting index of each column and number of rows in it.
largecol = safezip(range(0, num_largecol*(nrows+1), (nrows+1)),
[nrows+1] * num_largecol)
smallcol = safezip(range(num_largecol*(nrows+1), len(handleboxes), nrows),
[nrows] * num_smallcol)
handle_label = safezip(handleboxes, labelboxes)
columnbox = []
for i0, di in largecol+smallcol:
# pack handleBox and labelBox into itemBox
itemBoxes = [HPacker(pad=0,
sep=self.handletextpad*fontsize,
children=[h, t], align="baseline")
for h, t in handle_label[i0:i0+di]]
# minimumdescent=False for the text of the last row of the column
itemBoxes[-1].get_children()[1].set_minimumdescent(False)
# pack columnBox
columnbox.append(VPacker(pad=0,
sep=self.labelspacing*fontsize,
align="baseline",
children=itemBoxes))
if self._mode == "expand":
mode = "expand"
else:
mode = "fixed"
sep = self.columnspacing*fontsize
self._legend_box = HPacker(pad=self.borderpad*fontsize,
sep=sep, align="baseline",
mode=mode,
children=columnbox)
self._legend_box.set_figure(self.figure)
self.texts = text_list
self.legendHandles = handle_list
def _update_legend_box(self, renderer):
"""
Update the dimension of the legend_box. This is required
becuase the paddings, the hadle size etc. depends on the dpi
of the renderer.
"""
# fontsize in points.
fontsize = renderer.points_to_pixels(self.fontsize)
if self._last_fontsize_points == fontsize:
# no update is needed
return
# each handle needs to be drawn inside a box of
# (x, y, w, h) = (0, -descent, width, height).
# And their corrdinates should be given in the display coordinates.
# The approximate height and descent of text. These values are
# only used for plotting the legend handle.
height = self._approx_text_height(renderer) * 0.7
descent = 0.
for handle in self.legendHandles:
if isinstance(handle, RegularPolyCollection):
npoints = self.scatterpoints
else:
npoints = self.numpoints
if npoints > 1:
# we put some pad here to compensate the size of the
# marker
xdata = np.linspace(0.3*fontsize,
(self.handlelength-0.3)*fontsize,
npoints)
xdata_marker = xdata
elif npoints == 1:
xdata = np.linspace(0, self.handlelength*fontsize, 2)
xdata_marker = [0.5*self.handlelength*fontsize]
if isinstance(handle, Line2D):
legline = handle
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline.set_data(xdata, ydata)
legline_marker = legline._legmarker
legline_marker.set_data(xdata_marker, ydata[:len(xdata_marker)])
elif isinstance(handle, Patch):
p = handle
p.set_bounds(0., 0.,
self.handlelength*fontsize,
(height-descent),
)
elif isinstance(handle, RegularPolyCollection):
p = handle
ydata = height*self._scatteryoffsets
p.set_offsets(zip(xdata_marker,ydata))
# correction factor
cor = fontsize / self._last_fontsize_points
# helper function to iterate over all children
def all_children(parent):
yield parent
for c in parent.get_children():
for cc in all_children(c): yield cc
#now update paddings
for box in all_children(self._legend_box):
if isinstance(box, PackerBase):
box.pad = box.pad * cor
box.sep = box.sep * cor
elif isinstance(box, DrawingArea):
box.width = self.handlelength*fontsize
box.height = height
box.xdescent = 0.
box.ydescent=descent
self._last_fontsize_points = fontsize
def _auto_legend_data(self):
"""
Returns list of vertices and extents covered by the plot.
Returns a two long list.
First element is a list of (x, y) vertices (in
display-coordinates) covered by all the lines and line
collections, in the legend's handles.
Second element is a list of bounding boxes for all the patches in
the legend's handles.
"""
assert self.isaxes # should always hold because function is only called internally
ax = self.parent
vertices = []
bboxes = []
lines = []
for handle in ax.lines:
assert isinstance(handle, Line2D)
path = handle.get_path()
trans = handle.get_transform()
tpath = trans.transform_path(path)
lines.append(tpath)
for handle in ax.patches:
assert isinstance(handle, Patch)
if isinstance(handle, Rectangle):
transform = handle.get_data_transform()
bboxes.append(handle.get_bbox().transformed(transform))
else:
transform = handle.get_transform()
bboxes.append(handle.get_path().get_extents(transform))
return [vertices, bboxes, lines]
def draw_frame(self, b):
'b is a boolean. Set draw frame to b'
self._drawFrame = b
def get_children(self):
'return a list of child artists'
children = []
if self._legend_box:
children.append(self._legend_box)
return children
def get_frame(self):
'return the Rectangle instance used to frame the legend'
return self.legendPatch
def get_lines(self):
'return a list of lines.Line2D instances in the legend'
return [h for h in self.legendHandles if isinstance(h, Line2D)]
def get_patches(self):
'return a list of patch instances in the legend'
return silent_list('Patch', [h for h in self.legendHandles if isinstance(h, Patch)])
def get_texts(self):
'return a list of text.Text instance in the legend'
return silent_list('Text', self.texts)
def get_window_extent(self):
'return a extent of the the legend'
return self.legendPatch.get_window_extent()
def _get_anchored_bbox(self, loc, bbox, parentbbox, renderer):
"""
Place the *bbox* inside the *parentbbox* according to a given
location code. Return the (x,y) coordinate of the bbox.
- loc: a location code in range(1, 11).
This corresponds to the possible values for self._loc, excluding "best".
- bbox: bbox to be placed, display coodinate units.
- parentbbox: a parent box which will contain the bbox. In
display coordinates.
"""
assert loc in range(1,11) # called only internally
BEST, UR, UL, LL, LR, R, CL, CR, LC, UC, C = range(11)
anchor_coefs={UR:"NE",
UL:"NW",
LL:"SW",
LR:"SE",
R:"E",
CL:"W",
CR:"E",
LC:"S",
UC:"N",
C:"C"}
c = anchor_coefs[loc]
fontsize = renderer.points_to_pixels(self.fontsize)
container = parentbbox.padded(-(self.borderaxespad) * fontsize)
anchored_box = bbox.anchored(c, container=container)
return anchored_box.x0, anchored_box.y0
def _find_best_position(self, width, height, renderer, consider=None):
"""
Determine the best location to place the legend.
`consider` is a list of (x, y) pairs to consider as a potential
lower-left corner of the legend. All are display coords.
"""
assert self.isaxes # should always hold because function is only called internally
verts, bboxes, lines = self._auto_legend_data()
bbox = Bbox.from_bounds(0, 0, width, height)
consider = [self._get_anchored_bbox(x, bbox, self.parent.bbox, renderer) for x in range(1, len(self.codes))]
#tx, ty = self.legendPatch.get_x(), self.legendPatch.get_y()
candidates = []
for l, b in consider:
legendBox = Bbox.from_bounds(l, b, width, height)
badness = 0
badness = legendBox.count_contains(verts)
badness += legendBox.count_overlaps(bboxes)
for line in lines:
if line.intersects_bbox(legendBox):
badness += 1
ox, oy = l, b
if badness == 0:
return ox, oy
candidates.append((badness, (l, b)))
# rather than use min() or list.sort(), do this so that we are assured
# that in the case of two equal badnesses, the one first considered is
# returned.
# NOTE: list.sort() is stable.But leave as it is for now. -JJL
minCandidate = candidates[0]
for candidate in candidates:
if candidate[0] < minCandidate[0]:
minCandidate = candidate
ox, oy = minCandidate[1]
return ox, oy
| agpl-3.0 |
murali-munna/scikit-learn | sklearn/neighbors/base.py | 115 | 29783 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..utils.validation import NotFittedError
from ..externals import six
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if (self.n_neighbors is None
or self.n_neighbors < self._fit_X.shape[0] // 2):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
return self
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns distance
Parameters
----------
X : array-like, last dimension same as that of fit data, optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = self._tree.query(X, n_neighbors,
return_distance=return_distance)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, last dimension same as that of fit data, optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([1., 1., 1.])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
"""
return self._fit(X)
| bsd-3-clause |
jhaux/tensorflow | tensorflow/examples/learn/text_classification_character_cnn.py | 30 | 4292 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is an example of using convolutional networks over characters for
DBpedia dataset to predict class from description of an entity.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
learn = tf.contrib.learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
N_FILTERS = 10
FILTER_SHAPE1 = [20, 256]
FILTER_SHAPE2 = [20, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
def char_cnn_model(features, target):
"""Character level convolutional neural network model to predict classes."""
target = tf.one_hot(target, 15, 1, 0)
byte_list = tf.reshape(
tf.one_hot(features, 256), [-1, MAX_DOCUMENT_LENGTH, 256, 1])
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.contrib.layers.convolution2d(
byte_list, N_FILTERS, FILTER_SHAPE1, padding='VALID')
# Add a ReLU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convolution+Relu.
pool1 = tf.nn.max_pool(
conv1,
ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1],
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.contrib.layers.convolution2d(
pool1, N_FILTERS, FILTER_SHAPE2, padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.contrib.layers.fully_connected(pool2, 15, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data, size='large')
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = learn.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = learn.Estimator(model_fn=char_cnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
roryk/exomeCov | ecov/fastqc.py | 1 | 1506 | import os.path as op
import pandas as pd
from fadapa import Fadapa
from bcbio.distributed.transaction import file_transaction
from bcbio.utils import rbind, file_exists, safe_makedir
def _get_module(fastq_list, module, wide=True):
dt_together = []
for sample in fastq_list:
dt = []
itern = fastq_list[sample].clean_data(module)
header = itern[0]
for data in itern[1:]:
if data[0].startswith("#"):
header = data
continue
if wide:
if data[0].find("-") > -1:
f, s = map(int, data[0].split("-"))
for pos in range(f, s):
dt.append([str(pos)] + data[1:])
else:
dt.append(data)
dt = pd.DataFrame(dt)
dt.columns = [h.replace(" ", "_") for h in header]
dt['sample'] = sample
dt_together.append(dt)
dt_together = rbind(dt_together)
return dt_together
def merge_fastq(data, args):
"""
merge all fastqc samples into one by module
"""
out_dir = safe_makedir(args.out)
fastqc_list = {}
for s in data:
name = s[0]['name']
fn = s[0]['fastqc']
fastqc_list[name] = Fadapa(fn)
module = [m[1] for m in fastqc_list[name].summary()][2:9]
for m in module:
out_file = op.join(out_dir, m.replace(" ", "_") + ".tsv")
dt = _get_module(fastqc_list, m)
dt.to_csv(out_file, sep="\t", index=False)
| mit |
RPGOne/scikit-learn | setup.py | 9 | 12000 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
from pkg_resources import parse_version
import traceback
import subprocess
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
SCIPY_MIN_VERSION = '0.9'
NUMPY_MIN_VERSION = '1.6.1'
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
extras_require={
'alldeps': (
'numpy >= {0}'.format(NUMPY_MIN_VERSION),
'scipy >= {0}'.format(SCIPY_MIN_VERSION),
),
},
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
# Remove c files if we are not within a sdist package
cwd = os.path.abspath(os.path.dirname(__file__))
remove_c_files = not os.path.exists(os.path.join(cwd, 'PKG-INFO'))
if remove_c_files:
cython_hash_file = os.path.join(cwd, 'cythonize.dat')
if os.path.exists(cython_hash_file):
os.unlink(cython_hash_file)
print('Will remove generated .c files')
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if any(filename.endswith(suffix) for suffix in
(".so", ".pyd", ".dll", ".pyc")):
os.unlink(os.path.join(dirpath, filename))
continue
extension = os.path.splitext(filename)[1]
if remove_c_files and extension in ['.c', '.cpp']:
pyx_file = str.replace(filename, extension, '.pyx')
if os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
def get_scipy_status():
"""
Returns a dictionary containing a boolean specifying whether SciPy
is up-to-date, along with the version string (empty string if
not installed).
"""
scipy_status = {}
try:
import scipy
scipy_version = scipy.__version__
scipy_status['up_to_date'] = parse_version(
scipy_version) >= parse_version(SCIPY_MIN_VERSION)
scipy_status['version'] = scipy_version
except ImportError:
traceback.print_exc()
scipy_status['up_to_date'] = False
scipy_status['version'] = ""
return scipy_status
def get_numpy_status():
"""
Returns a dictionary containing a boolean specifying whether NumPy
is up-to-date, along with the version string (empty string if
not installed).
"""
numpy_status = {}
try:
import numpy
numpy_version = numpy.__version__
numpy_status['up_to_date'] = parse_version(
numpy_version) >= parse_version(NUMPY_MIN_VERSION)
numpy_status['version'] = numpy_version
except ImportError:
traceback.print_exc()
numpy_status['up_to_date'] = False
numpy_status['version'] = ""
return numpy_status
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
p = subprocess.call([sys.executable, os.path.join(cwd,
'build_tools',
'cythonize.py'),
'sklearn'],
cwd=cwd)
if p != 0:
raise RuntimeError("Running cythonize failed!")
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if len(sys.argv) == 1 or (
len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg_info',
'--version',
'clean'))):
# For these actions, NumPy is not required, nor Cythonization
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
numpy_status = get_numpy_status()
numpy_req_str = "scikit-learn requires NumPy >= {0}.\n".format(
NUMPY_MIN_VERSION)
scipy_status = get_scipy_status()
scipy_req_str = "scikit-learn requires SciPy >= {0}.\n".format(
SCIPY_MIN_VERSION)
instructions = ("Installation instructions are available on the "
"scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if numpy_status['up_to_date'] is False:
if numpy_status['version']:
raise ImportError("Your installation of Numerical Python "
"(NumPy) {0} is out-of-date.\n{1}{2}"
.format(numpy_status['version'],
numpy_req_str, instructions))
else:
raise ImportError("Numerical Python (NumPy) is not "
"installed.\n{0}{1}"
.format(numpy_req_str, instructions))
if scipy_status['up_to_date'] is False:
if scipy_status['version']:
raise ImportError("Your installation of Scientific Python "
"(SciPy) {0} is out-of-date.\n{1}{2}"
.format(scipy_status['version'],
scipy_req_str, instructions))
else:
raise ImportError("Scientific Python (SciPy) is not "
"installed.\n{0}{1}"
.format(scipy_req_str, instructions))
from numpy.distutils.core import setup
metadata['configuration'] = configuration
if len(sys.argv) >= 2 and sys.argv[1] not in 'config':
# Cythonize if needed
print('Generating cython files')
cwd = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(os.path.join(cwd, 'PKG-INFO')):
# Generate Cython sources, unless building from source release
generate_cython()
# Clean left-over .so file
for dirpath, dirnames, filenames in os.walk(
os.path.join(cwd, 'sklearn')):
for filename in filenames:
extension = os.path.splitext(filename)[1]
if extension in (".so", ".pyd", ".dll"):
pyx_file = str.replace(filename, extension, '.pyx')
print(pyx_file)
if not os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
clemkoa/scikit-learn | sklearn/metrics/tests/test_regression.py | 49 | 8058 | from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises, assert_raises_regex
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_squared_log_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_squared_log_error(y_true, y_pred),
mean_squared_error(np.log(1 + y_true),
np.log(1 + y_pred)))
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = mean_squared_log_error(y_true, y_pred)
assert_almost_equal(error, 0.200, decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_squared_log_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
assert_raises_regex(ValueError, "Mean Squared Logarithmic Error cannot be "
"used when targets contain negative values.",
mean_squared_log_error, [-1.], [-1.])
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test__check_reg_targets_exception():
invalid_multioutput = 'this_value_is_not_valid'
expected_message = ("Allowed 'multioutput' string values are.+"
"You provided multioutput={!r}".format(
invalid_multioutput))
assert_raises_regex(ValueError, expected_message,
_check_reg_targets,
[1, 2, 3],
[[1], [2], [3]],
invalid_multioutput)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
# Handling msle separately as it does not accept negative inputs.
y_true = np.array([[0.5, 1], [1, 2], [7, 6]])
y_pred = np.array([[0.5, 2], [1, 2.5], [8, 8]])
msle = mean_squared_log_error(y_true, y_pred, multioutput='raw_values')
msle2 = mean_squared_error(np.log(1 + y_true), np.log(1 + y_pred),
multioutput='raw_values')
assert_array_almost_equal(msle, msle2, decimal=2)
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
# Handling msle separately as it does not accept negative inputs.
y_true = np.array([[0.5, 1], [1, 2], [7, 6]])
y_pred = np.array([[0.5, 2], [1, 2.5], [8, 8]])
msle = mean_squared_log_error(y_true, y_pred, multioutput=[0.3, 0.7])
msle2 = mean_squared_error(np.log(1 + y_true), np.log(1 + y_pred),
multioutput=[0.3, 0.7])
assert_almost_equal(msle, msle2, decimal=2)
| bsd-3-clause |
qianfengzh/ML-source-code | algorithms/LR/logRegres.py | 1 | 3675 | #-*-coding=utf-8-*-
#-----------------------
# Named: Logistic Regression
# Created: 2016-07-12
# @Author: Qianfeng
#-----------------------
import numpy as np
import random
import matplotlib.pyplot as plt
def loadDataSet():
dataMat = []
labelMat = []
with open('testSet.txt') as fr:
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat, labelMat
def sigmoid(inX):
return 1.0/(1+np.exp(-inX))
def gradAscent(dataMatIn, classLabels):
dataMatrix = np.mat(dataMatIn)
labelMat = np.mat(classLabels).transpose()
m, n = np.shape(dataMatrix)
alpha = 0.001 # learning rate
maxCycle = 500 # stop condition
weigths = np.ones((n,1))
for k in range(maxCycle):
h = sigmoid(dataMatrix * weigths)
# 梯度上升更新
error = (labelMat - h)
weigths = weigths + alpha * dataMatrix.transpose() * error
return weigths
#----------------------------------------------------------------
def plotBestFit(weigths):
dataMat, labelMat = loadDataSet()
dataArr = np.array(dataMat)
n = dataArr.shape[0]
xcord1 = []; ycord1 = []
xcord2 = []; ycord2 = []
for i in range(n):
if int(labelMat[i]) == 1:
xcord1.append(dataArr[i,1])
ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1])
ycord2.append(dataArr[i,2])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')
ax.scatter(xcord2, ycord2, s=30, c='green')
x = np.arange(-3.0, 3.0, 0.1)
y = (-weigths[0]-weigths[1]*x)/weigths[2]
ax.plot(x, y)
plt.xlabel('X1')
plt.ylabel('X2')
plt.show()
#----------------------------------------------
# 随机梯度上升(可用作在线学习)
def stocGradAscent0(dataMatrix, classLabels):
m, n = dataMatrix.shape
alpha = 0.01
weigths = np.ones((n,1))
for i in range(m): # 对 m 个样本进行逐个迭代
h = sigmoid(sum(dataMatrix[i] * weigths))
error = classLabels[i] - h
weigths = weigths + alpha * error * dataMatrix[i]
return weigths
def stocGradAscent1(dataMatrix, classLabels, numIter=150):
m, n = dataMatrix.shape
weigths = np.ones(n)
for j in range(numIter):
dataIndex = range(m)
for i in range(m):
alpha = 4/(1.0+j+i)+0.01 # alpha 的衰减速度先快后慢
randIndex = int(random.uniform(0,len(dataIndex)))
h = sigmoid(sum(dataMatrix[randIndex] * weigths))
error = classLabels[randIndex] - h
weigths = weigths + alpha * error * dataMatrix[randIndex]
del (dataMatrix[randIndex])
return weigths
#---------------------------------------------------
# 实际应用
def classifyVector(inX, weigths):
prob = sigmoid(sum(inX * weigths))
if prob > 0.5:
return 1.0
else: return 0.0
def colicTest():
frTrain = open('horseColicTraining.txt')
frTest = open('horseColicTest.txt')
trainingSet = []
trainingLabels = []
for line in frTrain.readlines():
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
trainingSet.append(lineArr)
trainingLabels.append(float(currLine[i]))
trainWeights = stocGradAscent1(np.array(trainingSet), trainingLabels, 500)
errorCount = 0.0
numTestVec = 0.0
for line in frTest.readlines():
numTestVec += 1.0
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
if int(classifyVector(np.array(lineArr), trainWeights))!=int(currLine[21]):
errorCount += 1.0
errorRate = (float(errorCount)/numTestVec)
return errorRate
def multiTest():
numTestVec = 10
errorSum = 0.0
for k in range(numTests):
errorSum += colicTest()
return errorSum/float(numTests)
| gpl-2.0 |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/sklearn/utils/tests/test_murmurhash.py | 79 | 2849 | # Author: Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.externals.six import b, u
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_equal, assert_true
def test_mmhash3_int():
assert_equal(murmurhash3_32(3), 847579505)
assert_equal(murmurhash3_32(3, seed=0), 847579505)
assert_equal(murmurhash3_32(3, seed=42), -1823081949)
assert_equal(murmurhash3_32(3, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949)
assert_equal(murmurhash3_32(3, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347)
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert_equal(murmurhash3_32(b('foo'), 0), -156908512)
assert_equal(murmurhash3_32(b('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014)
def test_mmhash3_unicode():
assert_equal(murmurhash3_32(u('foo'), 0), -156908512)
assert_equal(murmurhash3_32(u('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014)
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert_true(h not in previous_hashes,
"Found collision on growing empty string")
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float64)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.ones(n_bins) / n_bins
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
| mit |
Fireblend/scikit-learn | sklearn/externals/joblib/parallel.py | 86 | 35087 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
import gc
import warnings
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = .2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
if issubclass(e_type, TransportableException):
raise
else:
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateComputeBatch(object):
"""Sequential computation of a batch of tasks.
This replicates the async computation API but actually does not delay
the computations when joblib.Parallel runs in sequential mode.
"""
def __init__(self, batch):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = batch()
def get(self):
return self.results
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
if (self.parallel.batch_size == 'auto'
and self.batch_size == self.parallel._effective_batch_size):
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self.parallel._smoothed_batch_duration
if old_duration == 0:
# First record of duration for this batch size after the last
# reset.
new_duration = this_batch_duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * this_batch_duration
self.parallel._smoothed_batch_duration = new_duration
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str or None, default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers shoud never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend='multiprocessing', verbose=0,
pre_dispatch='2 * n_jobs', batch_size='auto', temp_folder=None,
max_nbytes='1M', mmap_mode='r'):
self.verbose = verbose
self._mp_context = None
if backend is None:
# `backend=None` was supported in 0.8.2 with this effect
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
if (batch_size == 'auto'
or isinstance(batch_size, Integral) and batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self.pre_dispatch = pre_dispatch
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it
# unless they choose to use the context manager API with a with block.
self._pool = None
self._output = None
self._jobs = list()
self._managed_pool = False
# This lock is used coordinate the main thread of this process with
# the async callback thread of our the pool.
self._lock = threading.Lock()
def __enter__(self):
self._managed_pool = True
self._initialize_pool()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._terminate_pool()
self._managed_pool = False
def _effective_n_jobs(self):
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
elif mp is None or n_jobs is None:
# multiprocessing is not available or disabled, fallback
# to sequential mode
return 1
elif n_jobs < 0:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
return n_jobs
def _initialize_pool(self):
"""Build a process or thread pool and return the number of workers"""
n_jobs = self._effective_n_jobs()
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
if n_jobs == 1:
# Sequential mode: do not use a pool instance to avoid any
# useless dispatching overhead
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=3)
return 1
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=3)
return 1
else:
already_forked = int(os.environ.get(JOBLIB_SPAWNED_PROCESS, 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
# Make sure to free as much memory as possible before forking
gc.collect()
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
context_id=0, # the pool is used only for one call
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
return n_jobs
def _terminate_pool(self):
if self._pool is not None:
self._pool.close()
self._pool.terminate() # terminate does a join()
self._pool = None
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
if self._pool is None:
job = ImmediateComputeBatch(batch)
self._jobs.append(job)
self.n_dispatched_batches += 1
self.n_dispatched_tasks += len(batch)
self.n_completed_tasks += len(batch)
if not _verbosity_filter(self.n_dispatched_batches, self.verbose):
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(time.time() - self._start_time)
))
else:
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._pool.apply_async(SafeFunction(batch), callback=cb)
self._jobs.append(job)
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto' and self.backend == 'threading':
# Batching is never beneficial with the threading backend
batch_size = 1
elif self.batch_size == 'auto':
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if (batch_duration > 0 and
batch_duration < MIN_IDEAL_BATCH_DURATION):
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(
old_batch_size * MIN_IDEAL_BATCH_DURATION / batch_duration)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.verbose >= 10:
self._print("Batch computation too fast (%.4fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
elif (batch_duration > MAX_IDEAL_BATCH_DURATION and
old_batch_size >= 2):
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
self._effective_batch_size = batch_size = old_batch_size // 2
if self.verbose >= 10:
self._print("Batch computation too slow (%.2fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = 0
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if not tasks:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterator:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time),
))
else:
index = self.n_dispatched_batches
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1
- self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched_tasks - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by default hence
# the use of the lock
with self._lock:
job = self._jobs.pop(0)
try:
self._output.extend(job.get())
except tuple(self.exceptions) as exception:
# Stop dispatching any new job in the async callback thread
self._aborting = True
if isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (this_report, exception.message)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
exception = exception_type(report)
# Kill remaining running processes without waiting for
# the results as we will raise the exception we got back
# to the caller instead of returning any result.
with self._lock:
self._terminate_pool()
if self._managed_pool:
# In case we had to terminate a managed pool, let
# us start a new one to ensure that subsequent calls
# to __call__ on the same Parallel instance will get
# a working pool as they expect.
self._initialize_pool()
raise exception
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
if not self._managed_pool:
n_jobs = self._initialize_pool()
else:
n_jobs = self._effective_n_jobs()
if self.batch_size == 'auto':
self._effective_batch_size = 1
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
self._smoothed_batch_duration = 0.0
try:
self._iterating = True
while self.dispatch_one_batch(iterator):
pass
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output), len(self._output),
short_format_time(elapsed_time)))
finally:
if not self._managed_pool:
self._terminate_pool()
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause |
0asa/scikit-learn | sklearn/ensemble/partial_dependence.py | 36 | 14909 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
bsipocz/bokeh | bokeh/session.py | 42 | 20253 | ''' The session module provides the Session class, which encapsulates a
connection to a Document that resides on a Bokeh server.
The Session class provides methods for creating, loading and storing
documents and objects, as well as methods for user-authentication. These
are useful when the server is run in multi-user mode.
'''
from __future__ import absolute_import, print_function
#--------
# logging
#--------
import logging
logger = logging.getLogger(__name__)
#-------------
# standard lib
#-------------
import time
import json
from os import makedirs
from os.path import expanduser, exists, join
import tempfile
#------------
# third party
#------------
from six.moves.urllib.parse import urlencode
from requests.exceptions import ConnectionError
#---------
# optional
#---------
try:
import pandas as pd
import tables
has_pandas = True
except ImportError as e:
has_pandas = False
#--------
# project
#--------
from . import browserlib
from . import protocol
from .embed import autoload_server
from .exceptions import DataIntegrityException
from .util.notebook import publish_display_data
from .util.serialization import dump, get_json, urljoin
DEFAULT_SERVER_URL = "http://localhost:5006/"
class Session(object):
""" Encapsulate a connection to a document stored on a Bokeh Server.
Args:
name (str, optional) : name of server
root_url (str, optional) : root url of server
userapikey (str, optional) : (default: "nokey")
username (str, optional) : (default: "defaultuser")
load_from_config (bool, optional) :
Whether to load login information from config. (default: True)
If False, then we may overwrite the user's config.
configdir (str) : location of user configuration information
Attributes:
base_url (str) :
configdir (str) :
configfile (str) :
http_session (requests.session) :
userapikey (str) :
userinfo (dict) :
username (str) :
"""
def __init__(
self,
name = DEFAULT_SERVER_URL,
root_url = DEFAULT_SERVER_URL,
userapikey = "nokey",
username = "defaultuser",
load_from_config = True,
configdir = None,
):
self.name = name
if not root_url.endswith("/"):
logger.warning("root_url should end with a /, adding one")
root_url = root_url + "/"
self.root_url = root_url
# single user mode case
self.userapikey = userapikey
self.username = username
self._configdir = None
if configdir:
self.configdir = configdir
if load_from_config:
self.load()
@property
def http_session(self):
if hasattr(self, "_http_session"):
return self._http_session
else:
import requests
self._http_session = requests.session()
return self._http_session
@property
def username(self):
return self.http_session.headers.get('BOKEHUSER')
@username.setter
def username(self, val):
self.http_session.headers.update({'BOKEHUSER': val})
@property
def userapikey(self):
return self.http_session.headers.get('BOKEHUSER-API-KEY')
@userapikey.setter
def userapikey(self, val):
self.http_session.headers.update({'BOKEHUSER-API-KEY': val})
@property
def configdir(self):
""" filename where our config are stored. """
if self._configdir:
return self._configdir
bokehdir = join(expanduser("~"), ".bokeh")
if not exists(bokehdir):
makedirs(bokehdir)
return bokehdir
# for testing
@configdir.setter
def configdir(self, path):
self._configdir = path
@property
def configfile(self):
return join(self.configdir, "config.json")
def load_dict(self):
configfile = self.configfile
if not exists(configfile):
data = {}
else:
with open(configfile, "r") as f:
data = json.load(f)
return data
def load(self):
""" Loads the server configuration information from disk
Returns:
None
"""
config_info = self.load_dict().get(self.name, {})
print("Using saved session configuration for %s" % self.name)
print("To override, pass 'load_from_config=False' to Session")
self.root_url = config_info.get('root_url', self.root_url)
self.userapikey = config_info.get('userapikey', self.userapikey)
self.username = config_info.get('username', self.username)
def save(self):
""" Save the server configuration information to JSON
Returns:
None
"""
data = self.load_dict()
data[self.name] = {'root_url': self.root_url,
'userapikey': self.userapikey,
'username': self.username}
configfile = self.configfile
with open(configfile, "w+") as f:
json.dump(data, f)
def register(self, username, password):
''' Register a new user with a bokeh server.
.. note::
This is useful in multi-user mode.
Args:
username (str) : user name to register
password (str) : user password for account
Returns:
None
'''
url = urljoin(self.root_url, "bokeh/register")
result = self.execute('post', url, data={
'username': username,
'password': password,
'api': 'true'
})
if result.status_code != 200:
raise RuntimeError("Unknown Error")
result = get_json(result)
if result['status']:
self.username = username
self.userapikey = result['userapikey']
self.save()
else:
raise RuntimeError(result['error'])
def login(self, username, password):
''' Log a user into a bokeh server.
.. note::
This is useful in multi-user mode.
Args:
username (str) : user name to log in
password (str) : user password
Returns:
None
'''
url = urljoin(self.root_url, "bokeh/login")
result = self.execute('post', url, data={
'username': username,
'password': password,
'api': 'true'
})
if result.status_code != 200:
raise RuntimeError("Unknown Error")
result = get_json(result)
if result['status']:
self.username = username
self.userapikey = result['userapikey']
self.save()
else:
raise RuntimeError(result['error'])
self.save()
def browser_login(self):
""" Open a browser with a token that logs the user into a bokeh server.
.. note::
This is useful in multi-user mode.
Return:
None
"""
controller = browserlib.get_browser_controller()
url = urljoin(self.root_url, "bokeh/loginfromapikey")
url += "?" + urlencode({'username': self.username,
'userapikey': self.userapikey})
controller.open(url)
def data_source(self, name, data):
""" Makes and uploads a server data source to the server.
.. note::
The server must be configured with a data directory.
Args:
name (str) : name for the data source object
data (pd.DataFrame or np.array) : data to upload
Returns:
a ServerDataSource
"""
raise NotImplementedError
def list_data(self):
""" Return all the data soruces on the server.
Returns:
sources : JSON
"""
raise NotImplementedError
def publish(self):
url = urljoin(self.root_url, "/bokeh/%s/publish" % self.docid)
self.post_json(url)
def execute(self, method, url, headers=None, **kwargs):
""" Execute an HTTP request using the current session.
Returns the response
Args:
method (string) : 'get' or 'post'
url (string) : url
headers (dict, optional) : any extra HTTP headers
Keyword Args:
Any extra arguments to pass into the requests library
Returns:
response
Returns the response
"""
import requests
import warnings
func = getattr(self.http_session, method)
try:
resp = func(url, headers=headers, **kwargs)
except requests.exceptions.ConnectionError as e:
warnings.warn("You need to start the bokeh-server to see this example.")
raise e
if resp.status_code == 409:
raise DataIntegrityException
if resp.status_code == 401:
raise Exception('HTTP Unauthorized accessing')
return resp
def execute_json(self, method, url, headers=None, **kwargs):
""" same as execute, except ensure that json content-type is
set in headers and interprets and returns the json response
"""
if headers is None:
headers = {}
headers['content-type'] = 'application/json'
resp = self.execute(method, url, headers=headers, **kwargs)
return get_json(resp)
def get_json(self, url, headers=None, **kwargs):
""" Return the result of an HTTP 'get'.
Args:
url (str) : the URL for the 'get' request
headers (dict, optional) : any extra HTTP headers
Keyword Args:
Any extra arguments to pass into the requests library
Returns:
response: JSON
"""
return self.execute_json('get', url, headers=headers, **kwargs)
def post_json(self, url, headers=None, **kwargs):
""" Return the result of an HTTP 'post'
Args:
url (str) : the URL for the 'get' request
headers (dict, optional) : any extra HTTP headers
Keyword Args:
Any extra arguments to pass into the requests library
Returns:
response: JSON
"""
return self.execute_json('post', url, headers=headers, **kwargs)
@property
def userinfo(self):
if not hasattr(self, "_userinfo"):
url = urljoin(self.root_url, 'bokeh/userinfo/')
self._userinfo = self.get_json(url)
return self._userinfo
@userinfo.setter
def userinfo(self, val):
self._userinfo = val
@property
def base_url(self):
return urljoin(self.root_url, "bokeh/bb/")
def get_api_key(self, docid):
""" Retrieve the document API key from the server.
Args:
docid (string) : docid of the document to retrive API key for
Returns:
apikey : string
"""
url = urljoin(self.root_url,"bokeh/getdocapikey/%s" % docid)
apikey = self.get_json(url)
if 'apikey' in apikey:
apikey = apikey['apikey']
logger.info('got read write apikey')
else:
apikey = apikey['readonlyapikey']
logger.info('got read only apikey')
return apikey
def find_doc(self, name):
""" Return the docid of the document with a title matching ``name``.
.. note::
Creates a new document with the given title if one is not found.
Args:
name (string) : name for the document
Returns:
docid : str
"""
docs = self.userinfo.get('docs')
matching = [x for x in docs if x.get('title') == name]
if len(matching) == 0:
logger.info("No documents found, creating new document '%s'" % name)
self.make_doc(name)
return self.find_doc(name)
elif len(matching) > 1:
logger.warning("Multiple documents with name '%s'" % name)
return matching[0]['docid']
def use_doc(self, name=None, docid=None):
""" Configure the session to use a given document.
Args:
name (str, optional) : name of the document to use
docid (str, optional) : id of the document to use
.. note::
only one of ``name`` or ``docid`` may be supplied.
Creates a document for with the given name if one is not present on
the server.
Returns:
None
"""
if docid is not None and name is not None:
raise ValueError("only one of 'name' or 'docid' can be supplied to use_doc(...)")
if docid:
self.docid = docid
else:
self.docid = self.find_doc(name)
self.apikey = self.get_api_key(self.docid)
def make_doc(self, title):
""" Makes a new document with the given title on the server
.. note:: user information is reloaded
Returns:
None
"""
url = urljoin(self.root_url,"bokeh/doc/")
data = protocol.serialize_json({'title' : title})
self.userinfo = self.post_json(url, data=data)
def pull(self, typename=None, objid=None):
""" Pull JSON objects from the server.
Returns a specific object if both ``typename`` and ``objid`` are
supplied. Otherwise, returns all objects for the currently configured
document.
This is a low-level function.
Args:
typename (str, optional) : name of the type of object to pull
objid (str, optional) : ID of the object to pull
.. note::
you must supply either ``typename`` AND ``objid`` or omit both.
Returns:
attrs : JSON
"""
if typename is None and objid is None:
url = urljoin(self.base_url, self.docid +"/")
attrs = self.get_json(url)
elif typename is None or objid is None:
raise ValueError("typename and objid must both be None, or neither.")
else:
url = urljoin(
self.base_url,
self.docid + "/" + typename + "/" + objid + "/"
)
attr = self.get_json(url)
attrs = [{
'type': typename,
'id': objid,
'attributes': attr
}]
return attrs
def push(self, *jsonobjs):
""" Push JSON objects to the server.
This is a low-level function.
Args:
*jsonobjs (JSON) : objects to push to the server
Returns:
None
"""
data = protocol.serialize_json(jsonobjs)
url = urljoin(self.base_url, self.docid + "/", "bulkupsert")
self.post_json(url, data=data)
def gc(self):
url = urljoin(self.base_url, self.docid + "/", "gc")
self.post_json(url)
# convenience functions to use a session and store/fetch from server
def load_document(self, doc):
""" Loads data for the session and merge with the given document.
Args:
doc (Document) : document to load data into
Returns:
None
"""
self.gc()
json_objs = self.pull()
doc.merge(json_objs)
doc.docid = self.docid
def load_object(self, obj, doc):
""" Update an object in a document with data pulled from the server.
Args:
obj (PlotObject) : object to be updated
doc (Document) : the object's document
Returns:
None
"""
assert obj._id in doc._models
attrs = self.pull(typename=obj.__view_model__, objid=obj._id)
doc.load(*attrs)
def store_document(self, doc, dirty_only=True):
""" Store a document on the server.
Returns the models that were actually pushed.
Args:
doc (Document) : the document to store
dirty_only (bool, optional) : whether to store only dirty objects. (default: True)
Returns:
models : list[PlotObject]
"""
doc._add_all()
models = doc._models.values()
if dirty_only:
models = [x for x in models if getattr(x, '_dirty', False)]
json_objs = doc.dump(*models)
self.push(*json_objs)
for model in models:
model._dirty = False
return models
def store_objects(self, *objs, **kwargs):
""" Store objects on the server
Returns the objects that were actually stored.
Args:
*objs (PlotObject) : objects to store
Keywords Args:
dirty_only (bool, optional) : whether to store only dirty objects. (default: True)
Returns:
models : set[PlotObject]
"""
models = set()
for obj in objs:
models.update(obj.references())
if kwargs.pop('dirty_only', True):
models = list(models)
json_objs = dump(models, self.docid)
self.push(*json_objs)
for model in models:
model._dirty = False
return models
def object_link(self, obj):
""" Return a URL to a server page that will render the given object.
Args:
obj (PlotObject) : object to render
Returns:
URL string
"""
link = "bokeh/doc/%s/%s" % (self.docid, obj._id)
return urljoin(self.root_url, link)
def show(self, obj):
""" Display an object as HTML in IPython using its display protocol.
Args:
obj (PlotObject) : object to display
Returns:
None
"""
data = {'text/html': autoload_server(obj, self)}
publish_display_data(data)
def poll_document(self, document, interval=0.5):
""" Periodically ask the server for updates to the `document`. """
try:
while True:
self.load_document(document)
time.sleep(interval)
except KeyboardInterrupt:
print()
except ConnectionError:
print("Connection to bokeh-server was terminated")
# helper methods
def _prep_data_source_df(self, name, dataframe):
name = tempfile.NamedTemporaryFile(prefix="bokeh_data",
suffix=".pandas").name
store = pd.HDFStore(name)
store.append("__data__", dataframe, format="table", data_columns=True)
store.close()
return name
def _prep_data_source_numpy(self, name, arr):
name = tempfile.NamedTemporaryFile(prefix="bokeh_data",
suffix=".table").name
store = tables.File(name, 'w')
store.createArray("/", "__data__", obj=arr)
store.close()
return name
class TestSession(Session):
"""Currently, register and login do not work, everything else should work
in theory, but we'll have to test this as we go along and convert tests
"""
def __init__(self, *args, **kwargs):
if 'load_from_config' not in kwargs:
kwargs['load_from_config'] = False
self.client = kwargs.pop('client')
self.headers = {}
super(TestSession, self).__init__(*args, **kwargs)
@property
def username(self):
return self.headers.get('BOKEHUSER')
@username.setter
def username(self, val):
self.headers.update({'BOKEHUSER': val})
@property
def userapikey(self):
return self.headers.get('BOKEHUSER-API-KEY')
@userapikey.setter
def userapikey(self, val):
self.headers.update({'BOKEHUSER-API-KEY': val})
def execute(self, method, url, headers=None, **kwargs):
if headers is None:
headers = {}
func = getattr(self.client, method)
resp = func(url, headers=headers, **kwargs)
if resp.status_code == 409:
raise DataIntegrityException
if resp.status_code == 401:
raise Exception('HTTP Unauthorized accessing')
return resp
| bsd-3-clause |
xubenben/scikit-learn | examples/mixture/plot_gmm_selection.py | 248 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
weaver-viii/h2o-3 | h2o-docs/src/api/data-science-example-1/example-native-pandas-scikit.py | 22 | 2796 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
from pandas import Series, DataFrame
import pandas as pd
import numpy as np
import sklearn
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import preprocessing
# <codecell>
air_raw = DataFrame.from_csv("allyears_tiny.csv", index_col = False)
print(air_raw.head())
air_raw['RandNum'] = Series(np.random.uniform(size = len(air_raw['Origin'])))
print(air_raw.head())
# <codecell>
air_mapped = DataFrame()
air_mapped['RandNum'] = air_raw['RandNum']
air_mapped['IsDepDelayed'] = air_raw['IsDepDelayed']
air_mapped['IsDepDelayedInt'] = air_mapped.apply(lambda row:
1 if row['IsDepDelayed'] == 'YES' else 0,
axis=1)
del air_mapped['IsDepDelayed']
print(air_mapped.shape)
lb_origin = sklearn.preprocessing.LabelBinarizer()
lb_origin.fit(air_raw['Origin'])
tmp_origin = lb_origin.transform(air_raw['Origin'])
tmp_origin_df = DataFrame(tmp_origin)
print(tmp_origin_df.shape)
lb_dest = sklearn.preprocessing.LabelBinarizer()
lb_dest.fit(air_raw['Dest'])
tmp_dest = lb_origin.transform(air_raw['Dest'])
tmp_dest_df = DataFrame(tmp_dest)
print(tmp_dest_df.shape)
lb_uniquecarrier = sklearn.preprocessing.LabelBinarizer()
lb_uniquecarrier.fit(air_raw['UniqueCarrier'])
tmp_uniquecarrier = lb_origin.transform(air_raw['UniqueCarrier'])
tmp_uniquecarrier_df = DataFrame(tmp_uniquecarrier)
print(tmp_uniquecarrier_df.shape)
air_mapped = pd.concat([
air_mapped,
tmp_origin_df,
tmp_dest_df,
air_raw['Distance'],
tmp_uniquecarrier_df,
air_raw['Month'],
air_raw['DayofMonth'],
air_raw['DayOfWeek'],
],
axis=1)
print(air_mapped.shape)
air_mapped
air = air_mapped
# <codecell>
air_train = air.ix[air['RandNum'] <= 0.8]
# air_valid = air.ix[(air['RandNum'] > 0.8) & (air['RandNum'] <= 0.9)]
air_test = air.ix[air['RandNum'] > 0.9]
print(air_train.shape)
print(air_test.shape)
# <codecell>
X_train = air_train.copy(deep=True)
del X_train['RandNum']
del X_train['IsDepDelayedInt']
print(list(X_train.columns.values))
print(X_train.shape)
y_train = air_train['IsDepDelayedInt']
print(y_train.shape)
# <codecell>
clf = GradientBoostingClassifier(n_estimators = 10, max_depth = 3, learning_rate = 0.01)
clf.fit(X_train, y_train)
# <codecell>
X_test = air_test.copy(deep=True)
del X_test['RandNum']
del X_test['IsDepDelayedInt']
print(list(X_test.columns.values))
print(X_test.shape)
print("")
print("--- PREDICTIONS ---")
print("")
pred = clf.predict(X_test)
print(pred)
| apache-2.0 |
mmottahedi/neuralnilm_prototype | scripts/disag_545d.py | 2 | 8627 | from __future__ import print_function, division
#import matplotlib
import logging
from sys import stdout
# matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
from neuralnilm import (Net, RealApplianceSource)
from neuralnilm.source import (standardise, discretize, fdiff, power_and_fdiff,
RandomSegments, RandomSegmentsInMemory,
SameLocation)
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer,
SharedWeightsDenseLayer)
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import (
StartEndMeanPlotter, plot_disaggregate_start_stop_end)
from neuralnilm.disaggregate import (
disaggregate_start_stop_end, rectangles_to_matrix,
rectangles_matrix_to_vector, save_rectangles)
from neuralnilm.rectangulariser import rectangularise
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity, softmax
from lasagne.objectives import squared_error, binary_crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import (DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer,
DimshuffleLayer, DropoutLayer, ConcatLayer, PadLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
NAME = 'e545'
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 25000
N_SEQ_PER_BATCH = 64
MAX_TARGET_POWER = 2500
full_exp_name = NAME + 'd'
path = os.path.join(PATH, full_exp_name)
print("Changing directory to", path)
os.chdir(path)
logger = logging.getLogger(full_exp_name)
if not logger.handlers:
fh = logging.FileHandler(full_exp_name + '.log')
formatter = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(logging.StreamHandler(stream=stdout))
logger.setLevel(logging.DEBUG)
logger.info("***********************************")
logger.info("Preparing " + full_exp_name + "...")
# Load input stats
input_stats = {
'mean': np.load("input_stats_mean.npy"),
'std': np.load("input_stats_std.npy")
}
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
['washer dryer', 'washing machine'],
'kettle',
'HTPC',
'dish washer'
],
max_appliance_powers=[300, 2400, 2600, 200, 2500],
on_power_thresholds=[5] * 5,
min_on_durations=[60, 1800, 30, 60, 1800],
min_off_durations=[12, 600, 1, 12, 1800],
# Just load a tiny bit of data. Won't be used.
window=("2013-04-12", "2013-04-27"),
seq_length=2048,
output_one_appliance=True,
train_buildings=[1],
validation_buildings=[1],
n_seq_per_batch=N_SEQ_PER_BATCH,
standardise_input=True,
independently_center_inputs=False,
skip_probability=0.75,
target_is_start_and_end_and_mean=True,
one_target_per_seq=False,
input_stats=input_stats
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=lambda x, t: squared_error(x, t).mean(),
updates_func=nesterov_momentum,
learning_rate=1e-3,
do_save_activations=True,
auto_reshape=False,
plotter=StartEndMeanPlotter(
n_seq_to_plot=32, max_target_power=MAX_TARGET_POWER)
)
def exp_a(name):
global source
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
logger=logging.getLogger(name)
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': PadLayer,
'width': 4
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_size': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_size': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1), # back to (batch, time, features)
'label': 'dimshuffle3'
},
{
'type': DenseLayer,
'num_units': 512 * 16,
'nonlinearity': rectify,
'label': 'dense0'
},
{
'type': DenseLayer,
'num_units': 512 * 8,
'nonlinearity': rectify,
'label': 'dense1'
},
{
'type': DenseLayer,
'num_units': 512 * 4,
'nonlinearity': rectify,
'label': 'dense2'
},
{
'type': DenseLayer,
'num_units': 512,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': 3,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
net.load_params(300000)
return net
# Load neural net
net = exp_a(full_exp_name)
net.print_net()
net.compile()
# Generate mains data
# create new source, based on net's source,
# but with 5 outputs (so each seq includes entire appliance activation,
# and to make it easier to plot every appliance),
# and long seq length,
# then make one long mains by concatenating each seq
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
logger=logger,
seq_length=2048,
border=100,
output_one_appliance=False,
input_stats=input_stats,
target_is_start_and_end_and_mean=False,
window=("2014-12-10", None)
))
mains_source = RealApplianceSource(**source_dict_copy)
mains_source.start()
N_BATCHES = 1
logger.info("Preparing synthetic mains data for {} batches.".format(N_BATCHES))
mains = None
targets = None
TARGET_I = 4
for batch_i in range(N_BATCHES):
batch = mains_source.queue.get(timeout=30)
mains_batch, targets_batch = batch.data
if mains is None:
mains = mains_batch
targets = targets_batch[:, :, TARGET_I]
else:
mains = np.concatenate((mains, mains_batch))
targets = np.concatenate((targets, targets_batch[:, :, TARGET_I]))
mains_source.stop()
# Post-process data
seq_length = net.input_shape[1]
def pad(data):
return np.pad(data, (seq_length, seq_length), mode='constant',
constant_values=(data.min().astype(float), ))
mains = pad(mains.flatten())
targets = pad(targets.flatten())
logger.info("Done preparing synthetic mains data!")
# Unstandardise for plotting
targets *= MAX_TARGET_POWER
mains_unstandardised = (mains * input_stats['std']) + input_stats['mean']
mains_unstandardised *= mains_source.max_input_power
# disag
STRIDE = 16
logger.info("Starting disag...")
rectangles = disaggregate_start_stop_end(
mains, net, stride=STRIDE, max_target_power=MAX_TARGET_POWER)
rectangles_matrix = rectangles_to_matrix(rectangles[0], MAX_TARGET_POWER)
disag_vector = rectangles_matrix_to_vector(
rectangles_matrix, min_on_power=500, overlap_threshold=0.30)
# save data to disk
logger.info("Saving data to disk...")
np.save('mains', mains_unstandardised)
np.save('targets', targets)
np.save('disag_vector', disag_vector)
save_rectangles(rectangles)
# plot
logger.info("Plotting...")
fig, axes = plt.subplots(4, 1, sharex=True)
alpha = STRIDE / seq_length
plot_disaggregate_start_stop_end(rectangles, ax=axes[0], alpha=alpha)
axes[0].set_title('Network output')
axes[1].plot(disag_vector)
axes[1].set_title("Disaggregated vector")
axes[2].plot(targets)
axes[2].set_title("Target")
axes[3].plot(mains_unstandardised)
axes[3].set_title('Network input')
axes[3].set_xlim((0, len(mains)))
plt.show()
logger.info("DONE!")
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/disag_545d.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
| mit |
enlighter/learnML | mini-projects/p0 - titanic survival exploration/Titanic_Survival_Exploration.py | 1 | 3378 | import numpy as np
import pandas as pd
from pprintpp import pprint
# RMS Titanic data visualization code
from titanic_visualizations import survival_stats
from IPython.display import display
# Load the dataset
in_file = 'titanic_data.csv'
full_data = pd.read_csv(in_file)
# Store the 'Survived' feature in a new variable and remove it from the dataset
outcomes = full_data['Survived']
data = full_data.drop('Survived', axis = 1)
def accuracy_score(truth, pred):
""" Returns accuracy score for input truth and predictions. """
# Ensure that the number of predictions matches number of outcomes
if len(truth) == len(pred):
# Calculate and return the accuracy as a percent
return "Predictions have an accuracy of {:.2f}%.".format((truth == pred).mean()*100)
else:
return "Number of predictions does not match number of outcomes!"
#survival_stats(data, outcomes, 'Age', ["Sex == 'male'", "Pclass <= 3", "SibSp == 0"])
#survival_stats(data, outcomes, 'Age', ["Sex == 'male'", "Pclass <= 3", "SibSp == 1"])
#survival_stats(data, outcomes, 'Age', ["Sex == 'male'", "Pclass < 2", "SibSp == 0"])
#survival_stats(data, outcomes, 'Age', ["Sex == 'male'", "Pclass < 2", "SibSp == 1"])
survival_stats(data, outcomes, 'Pclass', ["Sex == 'female'", "SibSp == 0"])
survival_stats(data, outcomes, 'Pclass', ["Sex == 'female'", "SibSp > 0"])
def predictions_3(data):
""" Model with multiple features. Makes a prediction with an accuracy of at least 80%. """
predictions = []
for _, passenger in data.iterrows():
#print passenger
# Remove the 'pass' statement below
# and write your prediction conditions here
if passenger['Sex'] == 'male':
if passenger['Age'] < 10:
predictions.append(1)
print "%d" %(passenger['PassengerId'])
else:
if passenger['Pclass'] == 1:
if passenger['SibSp'] == 0:
if passenger['Age'] >= 10 and passenger['Age'] <= 40:
predictions.append(1)
print "%d" %(passenger['PassengerId'])
else:
predictions.append(0)
print "%d" %(passenger['PassengerId'])
elif passenger['SibSp'] > 0:
if passenger['Age'] >= 20 and passenger['Age'] <= 50:
predictions.append(1)
print "%d" %(passenger['PassengerId'])
else:
predictions.append(0)
print "%d" %(passenger['PassengerId'])
else:
predictions.append(0)
print "%d" %(passenger['PassengerId'])
elif passenger['Sex'] == 'female':
if passenger['Pclass'] == 3:
if passenger['SibSp'] > 0:
predictions.append(0)
print "%d" %(passenger['PassengerId'])
else:
predictions.append(1)
print "%d" %(passenger['PassengerId'])
else:
predictions.append(1)
print "%d" %(passenger['PassengerId'])
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_3(data)
print accuracy_score(outcomes, predictions)
| mit |
CELMA-project/CELMA | celma/pickleTweaks/blobs/blobStats.py | 1 | 2483 | #!/usr/bin/env python
"""
Tweaks the statistics obtained from the blob runner.
"""
import pickle
import matplotlib.pylab as plt
import matplotlib.patches as pa
from matplotlib.ticker import MaxNLocator
import numpy as np
import os, sys
# If we add to sys.path, then it must be an absolute path
commonDir = os.path.abspath("./../../../common")
# Sys path is a list of system paths
sys.path.append(commonDir)
from CELMAPy.plotHelpers import SizeMaker, PlotHelper, seqCMap2
# Set the label colors
colors = seqCMap2(np.linspace(0.25,0.75,3))
sigmas = (2,3,4)
sD = {key:{"waiting":None, "pulse":None, "color":c}\
for key, c in zip(sigmas,colors)}
scan = "B0_0.08"
path = "../../CSDXMagFieldScanAr/visualizationPhysical/{}/blobs/".format(scan)
# Obtain the patches
for key in sD.keys():
fileName =\
os.path.join(path, str(key), "temporalStats-blobs.pickle")
with open(fileName, "rb") as f:
fig = pickle.load(f)
axes = fig.get_axes()
wAx = axes[0]
pAx = axes[1]
wTitle = wAx.get_title()
pTitle = pAx.get_title()
wPatches = wAx.patches
pPatches = pAx.patches
title = fig.texts[0].get_text()
xLabel = wAx.get_xlabel()
yLabelP = pAx.get_ylabel()
yLabelW = wAx.get_ylabel()
sD[key]["waiting"] = wPatches
sD[key]["pulse"] = pPatches
# Make a new figure
fig, (wAx, pAx) = plt.subplots(ncols=2, figsize = SizeMaker.array(2,1))
patchesForLegend = []
for key in sD.keys():
for nr, p in enumerate(sD[key]["waiting"]):
# Make a new patch
label = r"${}\sigma$".format(key) if nr == 0 else None
wAx.add_patch(pa.Rectangle(p.get_xy(), p.get_width(),
p.get_height(), color = sD[key]["color"], alpha = 0.5,\
label = label))
for p in sD[key]["pulse"]:
# Make a new patch
pAx.add_patch(pa.Rectangle(p.get_xy(), p.get_width(),
p.get_height(), color = sD[key]["color"], alpha =
0.5))
# Set the decorations
wAx.set_title(wTitle)
wAx.set_xlabel(xLabel)
wAx.set_ylabel(yLabelW)
pAx.set_title(pTitle)
pAx.set_xlabel(xLabel)
pAx.set_ylabel(yLabelP)
wAx.autoscale()
pAx.autoscale()
fig.suptitle(title, y=1.1)
PlotHelper.makePlotPretty(wAx, rotation = 45)
PlotHelper.makePlotPretty(pAx, rotation = 45, legend = None)
wAx.yaxis.set_major_locator(MaxNLocator(integer=True))
pAx.yaxis.set_major_locator(MaxNLocator(integer=True))
PlotHelper.savePlot(fig, "blobStats.pdf")
| lgpl-3.0 |
jorge2703/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 129 | 43401 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([3, 2])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([-1, -1])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([3, 2])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([-1, -1])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([-1, -1])
d = clf.decision_function([-1, -1])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([3, 2])
p = clf.predict_proba([3, 2])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([-1, -1])
p = clf.predict_proba([-1, -1])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([3, 2])
p = clf.predict_proba([3, 2])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function(x)
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba(x)
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
| bsd-3-clause |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/pandas/tests/io/test_sql.py | 6 | 94046 | """SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
from __future__ import print_function
from warnings import catch_warnings
import pytest
import sqlite3
import csv
import os
import warnings
import numpy as np
import pandas as pd
from datetime import datetime, date, time
from pandas.core.dtypes.common import (
is_object_dtype, is_datetime64_dtype,
is_datetime64tz_dtype)
from pandas import DataFrame, Series, Index, MultiIndex, isnull, concat
from pandas import date_range, to_datetime, to_timedelta, Timestamp
import pandas.compat as compat
from pandas.compat import range, lrange, string_types, PY36
from pandas.core.tools.datetimes import format as date_format
import pandas.io.sql as sql
from pandas.io.sql import read_sql_table, read_sql_query
import pandas.util.testing as tm
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
'create_iris': {
'sqlite': """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
'mysql': """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
'postgresql': """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)"""
},
'insert_iris': {
'sqlite': """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
'mysql': """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
'postgresql': """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);"""
},
'create_test_types': {
'sqlite': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
'mysql': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
'postgresql': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)"""
},
'insert_test_types': {
'sqlite': {
'query': """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?)
""",
'fields': (
'TextCol', 'DateCol', 'IntDateCol', 'FloatCol',
'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull'
)
},
'mysql': {
'query': """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s)
""",
'fields': (
'TextCol', 'DateCol', 'IntDateCol', 'FloatCol',
'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull'
)
},
'postgresql': {
'query': """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
'fields': (
'TextCol', 'DateCol', 'DateColWithTz',
'IntDateCol', 'FloatCol',
'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull'
)
},
},
'read_parameters': {
'sqlite': "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
'mysql': 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
'postgresql': 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s'
},
'read_named_parameters': {
'sqlite': """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
'mysql': """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
'postgresql': """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
"""
},
'create_view': {
'sqlite': """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
}
}
class MixInBase(object):
def teardown_method(self, method):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute("DROP TABLE IF EXISTS %s" %
sql._get_valid_mysql_name(table_name))
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute('SHOW TABLES')
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute("DROP TABLE IF EXISTS %s" %
sql._get_valid_sqlite_name(table_name))
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute(
"SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest(object):
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, 'execute'):
return self.conn
else:
return self.conn.cursor()
def _load_iris_data(self):
import io
iris_csv_file = os.path.join(tm.get_data_path(), 'iris.csv')
self.drop_table('iris')
self._get_exec().execute(SQL_STRINGS['create_iris'][self.flavor])
with io.open(iris_csv_file, mode='r', newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS['insert_iris'][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table('iris_view')
self._get_exec().execute(SQL_STRINGS['create_view'][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _load_test1_data(self):
columns = ['index', 'A', 'B', 'C', 'D']
data = [(
'2000-01-03 00:00:00', 0.980268513777, 3.68573087906,
-0.364216805298, -1.15973806169),
('2000-01-04 00:00:00', 1.04791624281, -
0.0412318367011, -0.16181208307, 0.212549316967),
('2000-01-05 00:00:00', 0.498580885705,
0.731167677815, -0.537677223318, 1.34627041952),
('2000-01-06 00:00:00', 1.12020151869, 1.56762092543,
0.00364077397681, 0.67525259227)]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(dict(A=[4, 1, 3, 6],
B=['asd', 'gsq', 'ylt', 'jkl'],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=['1990-11-22', '1991-10-26',
'1993-11-26', '1995-12-12']))
df['E'] = to_datetime(df['E'])
self.test_frame2 = df
def _load_test3_data(self):
columns = ['index', 'A', 'B']
data = [(
'2000-01-03 00:00:00', 2 ** 31 - 1, -1.987670),
('2000-01-04 00:00:00', -29, -0.0412318367011),
('2000-01-05 00:00:00', 20000, 0.731167677815),
('2000-01-06 00:00:00', -290867, 1.56762092543)]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table('types_test_data')
self._get_exec().execute(SQL_STRINGS['create_test_types'][self.flavor])
ins = SQL_STRINGS['insert_test_types'][self.flavor]
data = [
{
'TextCol': 'first',
'DateCol': '2000-01-03 00:00:00',
'DateColWithTz': '2000-01-01 00:00:00-08:00',
'IntDateCol': 535852800,
'FloatCol': 10.10,
'IntCol': 1,
'BoolCol': False,
'IntColWithNull': 1,
'BoolColWithNull': False,
},
{
'TextCol': 'first',
'DateCol': '2000-01-04 00:00:00',
'DateColWithTz': '2000-06-01 00:00:00-07:00',
'IntDateCol': 1356998400,
'FloatCol': 10.10,
'IntCol': 1,
'BoolCol': False,
'IntColWithNull': None,
'BoolColWithNull': None,
},
]
for d in data:
self._get_exec().execute(
ins['query'],
[d[field] for field in ins['fields']]
)
def _count_rows(self, table_name):
result = self._get_exec().execute(
"SELECT count(*) AS count_1 FROM %s" % table_name).fetchone()
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS['read_parameters'][self.flavor]
params = ['Iris-setosa', 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS['read_named_parameters'][self.flavor]
params = {'name': 'Iris-setosa', 'length': 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(self.test_frame1, 'test_frame1')
assert self.pandasSQL.has_table('test_frame1')
# Nuke table
self.drop_table('test_frame1')
def _to_sql_empty(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], 'test_frame1')
def _to_sql_fail(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
assert self.pandasSQL.has_table('test_frame1')
pytest.raises(ValueError, self.pandasSQL.to_sql,
self.test_frame1, 'test_frame1', if_exists='fail')
self.drop_table('test_frame1')
def _to_sql_replace(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='replace')
assert self.pandasSQL.has_table('test_frame1')
num_entries = len(self.test_frame1)
num_rows = self._count_rows('test_frame1')
assert num_rows == num_entries
self.drop_table('test_frame1')
def _to_sql_append(self):
# Nuke table just in case
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='append')
assert self.pandasSQL.has_table('test_frame1')
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows('test_frame1')
assert num_rows == num_entries
self.drop_table('test_frame1')
def _roundtrip(self):
self.drop_table('test_frame_roundtrip')
self.pandasSQL.to_sql(self.test_frame1, 'test_frame_roundtrip')
result = self.pandasSQL.read_query(
'SELECT * FROM test_frame_roundtrip')
result.set_index('level_0', inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _to_sql_save_index(self):
df = DataFrame.from_records([(1, 2.1, 'line1'), (2, 1.5, 'line2')],
columns=['A', 'B', 'C'], index=['A'])
self.pandasSQL.to_sql(df, 'test_to_sql_saves_index')
ix_cols = self._get_index_columns('test_to_sql_saves_index')
assert ix_cols == [['A', ], ]
def _transaction_test(self):
self.pandasSQL.execute("CREATE TABLE test_trans (A INT, B TEXT)")
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
# Make sure when transaction is rolled back, no rows get inserted
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise Exception('error')
except:
# ignore raised exception
pass
res = self.pandasSQL.read_query('SELECT * FROM test_trans')
assert len(res) == 0
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query('SELECT * FROM test_trans')
assert len(res2) == 1
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = 'sqlite'
mode = None
def setup_method(self, method):
self.conn = self.connect()
self._load_iris_data()
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query(
"SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query(
"SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, 'test_frame1', self.conn)
assert sql.has_table('test_frame1', self.conn)
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, 'test_frame2',
self.conn, if_exists='fail')
assert sql.has_table('test_frame2', self.conn)
pytest.raises(ValueError, sql.to_sql, self.test_frame1,
'test_frame2', self.conn, if_exists='fail')
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, if_exists='replace')
assert sql.has_table('test_frame3', self.conn)
num_entries = len(self.test_frame1)
num_rows = self._count_rows('test_frame3')
assert num_rows == num_entries
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, if_exists='append')
assert sql.has_table('test_frame4', self.conn)
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows('test_frame4')
assert num_rows == num_entries
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, 'test_frame5', self.conn, index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype='int64'), name='series')
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_to_sql_panel(self):
with catch_warnings(record=True):
panel = tm.makePanel()
pytest.raises(NotImplementedError, sql.to_sql, panel,
'test_panel', self.conn)
def test_roundtrip(self):
sql.to_sql(self.test_frame1, 'test_frame_roundtrip',
con=self.conn)
result = sql.read_sql_query(
'SELECT * FROM test_frame_roundtrip',
con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index('level_0', inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(self.test_frame1, 'test_frame_roundtrip', con=self.conn,
index=False, chunksize=2)
result = sql.read_sql_query(
'SELECT * FROM test_frame_roundtrip',
con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_date_parsing(self):
# Test date parsing in read_sq
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates=['DateCol'])
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates=['IntDateCol'])
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates={'IntDateCol': 's'})
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
index_col='DateCol',
parse_dates=['DateCol', 'IntDateCol'])
assert issubclass(df.index.dtype.type, np.datetime64)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_timedelta(self):
# see #6921
df = to_timedelta(
Series(['00:00:01', '00:00:03'], name='foo')).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql('test_timedelta', self.conn)
result = sql.read_sql_query('SELECT * FROM test_timedelta', self.conn)
tm.assert_series_equal(result['foo'], df['foo'].astype('int64'))
def test_complex(self):
df = DataFrame({'a': [1 + 1j, 2j]})
# Complex data type should raise error
pytest.raises(ValueError, df.to_sql, 'test_complex', self.conn)
def test_to_sql_index_label(self):
temp_frame = DataFrame({'col1': range(4)})
# no index name, defaults to 'index'
sql.to_sql(temp_frame, 'test_index_label', self.conn)
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == 'index'
# specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label='other_label')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == "other_label"
# using the index name
temp_frame.index.name = 'index_name'
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == "index_name"
# has index name, but specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label='other_label')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == "other_label"
# index name is integer
temp_frame.index.name = 0
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == "0"
temp_frame.index.name = None
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label=0)
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == "0"
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame({'col1': range(4)},
index=MultiIndex.from_product(
[('A0', 'A1'), ('B0', 'B1')]))
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, 'test_index_label', self.conn)
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == 'level_0'
assert frame.columns[1] == 'level_1'
# specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label=['A', 'B'])
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[:2].tolist() == ['A', 'B']
# using the index name
temp_frame.index.names = ['A', 'B']
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[:2].tolist() == ['A', 'B']
# has index name, but specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label=['C', 'D'])
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[:2].tolist() == ['C', 'D']
# wrong length of index_label
pytest.raises(ValueError, sql.to_sql, temp_frame,
'test_index_label', self.conn, if_exists='replace',
index_label='C')
def test_multiindex_roundtrip(self):
df = DataFrame.from_records([(1, 2.1, 'line1'), (2, 1.5, 'line2')],
columns=['A', 'B', 'C'], index=['A', 'B'])
df.to_sql('test_multiindex_roundtrip', self.conn)
result = sql.read_sql_query('SELECT * FROM test_multiindex_roundtrip',
self.conn, index_col=['A', 'B'])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn,
if_exists='replace')
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, 'test', con=self.conn)
assert 'CREATE' in create_sql
def test_get_schema_dtypes(self):
float_frame = DataFrame({'a': [1.1, 1.2], 'b': [2.1, 2.2]})
dtype = sqlalchemy.Integer if self.mode == 'sqlalchemy' else 'INTEGER'
create_sql = sql.get_schema(float_frame, 'test',
con=self.conn, dtype={'b': dtype})
assert 'CREATE' in create_sql
assert 'INTEGER' in create_sql
def test_get_schema_keys(self):
frame = DataFrame({'Col1': [1.1, 1.2], 'Col2': [2.1, 2.2]})
create_sql = sql.get_schema(frame, 'test', con=self.conn, keys='Col1')
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
assert constraint_sentence in create_sql
# multiple columns as key (GH10385)
create_sql = sql.get_schema(self.test_frame1, 'test',
con=self.conn, keys=['A', 'B'])
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
assert constraint_sentence in create_sql
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list('abcde'))
df.to_sql('test_chunksize', self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query("select * from test_chunksize",
self.conn, chunksize=5):
res2 = concat([res2, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == 'sqlalchemy':
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn,
chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{'person_id': [1, 2, 3],
'person_name': ['John P. Doe', 'Jane Dove', 'John P. Doe']})
df2 = df.copy()
df2['person_name'] = df2['person_name'].astype('category')
df2.to_sql('test_categorical', self.conn, index=False)
res = sql.read_sql_query('SELECT * FROM test_categorical', self.conn)
tm.assert_frame_equal(res, df)
def test_unicode_column_name(self):
# GH 11431
df = DataFrame([[1, 2], [3, 4]], columns=[u'\xe9', u'b'])
df.to_sql('test_unicode', self.conn, index=False)
@pytest.mark.single
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = 'sqlite'
mode = 'sqlalchemy'
def connect(self):
if SQLALCHEMY_INSTALLED:
return sqlalchemy.create_engine('sqlite:///:memory:')
else:
pytest.skip('SQLAlchemy not installed')
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, 'test_frame', self.conn)
cols = ['A', 'B']
result = sql.read_sql_table('test_frame', self.conn, columns=cols)
assert result.columns.tolist() == cols
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, 'test_frame', self.conn)
result = sql.read_sql_table('test_frame', self.conn, index_col="index")
assert result.index.names == ["index"]
result = sql.read_sql_table(
'test_frame', self.conn, index_col=["A", "B"])
assert result.index.names == ["A", "B"]
result = sql.read_sql_table('test_frame', self.conn,
index_col=["A", "B"],
columns=["C", "D"])
assert result.index.names == ["A", "B"]
assert result.columns.tolist() == ["C", "D"]
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query(
"SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql(
"SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table('iris', self.conn)
iris_frame2 = sql.read_sql('iris', self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
sql.read_sql_table('other_table', self.conn)
sql.read_sql_query('SELECT * FROM other_table', self.conn)
# Verify some things
assert len(w) == 0
def test_warning_case_insensitive_table_name(self):
# see gh-7815
#
# We can't test that this warning is triggered, a the database
# configuration would have to be altered. But here we test that
# the warning is certainly NOT triggered in a normal case.
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# This should not trigger a Warning
self.test_frame1.to_sql('CaseSensitive', self.conn)
# Verify some things
assert len(w) == 0
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes('test_index_saved')
ixs = [i['column_names'] for i in ixs]
return ixs
def test_sqlalchemy_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame({'time': to_datetime(['201412120154', '201412110254'],
utc=True)})
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
assert isinstance(table.table.c['time'].type, sqltypes.DateTime)
def test_database_uri_string(self):
# Test read_sql and .to_sql method with a database URI (GH10654)
test_frame1 = self.test_frame1
# db_uri = 'sqlite:///:memory:' # raises
# sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near
# "iris": syntax error [SQL: 'iris']
with tm.ensure_clean() as name:
db_uri = 'sqlite:///' + name
table = 'iris'
test_frame1.to_sql(table, db_uri, if_exists='replace', index=False)
test_frame2 = sql.read_sql(table, db_uri)
test_frame3 = sql.read_sql_table(table, db_uri)
query = 'SELECT * FROM iris'
test_frame4 = sql.read_sql_query(query, db_uri)
tm.assert_frame_equal(test_frame1, test_frame2)
tm.assert_frame_equal(test_frame1, test_frame3)
tm.assert_frame_equal(test_frame1, test_frame4)
# using driver that will not be installed on Travis to trigger error
# in sqlalchemy.create_engine -> test passing of this error to user
db_uri = "postgresql+pg8000://user:pass@host/dbname"
with tm.assert_raises_regex(ImportError, "pg8000"):
sql.read_sql("select * from table", db_uri)
def _make_iris_table_metadata(self):
sa = sqlalchemy
metadata = sa.MetaData()
iris = sa.Table('iris', metadata,
sa.Column('SepalLength', sa.REAL),
sa.Column('SepalWidth', sa.REAL),
sa.Column('PetalLength', sa.REAL),
sa.Column('PetalWidth', sa.REAL),
sa.Column('Name', sa.TEXT)
)
return iris
def test_query_by_text_obj(self):
# WIP : GH10846
name_text = sqlalchemy.text('select * from iris where name=:name')
iris_df = sql.read_sql(name_text, self.conn, params={
'name': 'Iris-versicolor'})
all_names = set(iris_df['Name'])
assert all_names == set(['Iris-versicolor'])
def test_query_by_select_obj(self):
# WIP : GH10846
iris = self._make_iris_table_metadata()
name_select = sqlalchemy.select([iris]).where(
iris.c.Name == sqlalchemy.bindparam('name'))
iris_df = sql.read_sql(name_select, self.conn,
params={'name': 'Iris-setosa'})
all_names = set(iris_df['Name'])
assert all_names == set(['Iris-setosa'])
class _EngineToConnMixin(object):
"""
A mixin that causes setup_connect to create a conn rather than an engine.
"""
def setup_method(self, method):
super(_EngineToConnMixin, self).setup_method(method)
engine = self.conn
conn = engine.connect()
self.__tx = conn.begin()
self.pandasSQL = sql.SQLDatabase(conn)
self.__engine = engine
self.conn = conn
def teardown_method(self, method):
self.__tx.rollback()
self.conn.close()
self.conn = self.__engine
self.pandasSQL = sql.SQLDatabase(self.__engine)
super(_EngineToConnMixin, self).teardown_method(method)
@pytest.mark.single
class TestSQLApiConn(_EngineToConnMixin, TestSQLApi):
pass
@pytest.mark.single
class TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi):
"""
Test the public sqlite connection fallback API
"""
flavor = 'sqlite'
mode = 'fallback'
def connect(self, database=":memory:"):
return sqlite3.connect(database)
def test_sql_open_close(self):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).
with tm.ensure_clean() as name:
conn = self.connect(name)
sql.to_sql(self.test_frame3, "test_frame3_legacy",
conn, index=False)
conn.close()
conn = self.connect(name)
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;",
conn)
conn.close()
tm.assert_frame_equal(self.test_frame3, result)
def test_con_string_import_error(self):
if not SQLALCHEMY_INSTALLED:
conn = 'mysql://root@localhost/pandas_nosetest'
pytest.raises(ImportError, sql.read_sql, "SELECT * FROM iris",
conn)
else:
pytest.skip('SQLAlchemy is installed')
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
pytest.raises(sql.DatabaseError, sql.read_sql, 'iris', self.conn)
def test_safe_names_warning(self):
# GH 6798
df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b ']) # has a space
# warns on create table with spaces in names
with tm.assert_produces_warning():
sql.to_sql(df, "test_frame3_legacy", self.conn, index=False)
def test_get_schema2(self):
# without providing a connection object (available for backwards comp)
create_sql = sql.get_schema(self.test_frame1, 'test')
assert 'CREATE' in create_sql
def _get_sqlite_column_type(self, schema, column):
for col in schema.split('\n'):
if col.split()[0].strip('""') == column:
return col.split()[1]
raise ValueError('Column %s not found' % (column))
def test_sqlite_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame({'time': to_datetime(['201412120154', '201412110254'],
utc=True)})
db = sql.SQLiteDatabase(self.conn)
table = sql.SQLiteTable("test_type", db, frame=df)
schema = table.sql_schema()
assert self._get_sqlite_column_type(schema, 'time') == "TIMESTAMP"
# -----------------------------------------------------------------------------
# -- Database flavor specific tests
class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest):
"""
Base class for testing the sqlalchemy backend.
Subclasses for specific database types are created below. Tests that
deviate for each flavor are overwritten there.
"""
flavor = None
@classmethod
def setup_class(cls):
cls.setup_import()
cls.setup_driver()
# test connection
try:
conn = cls.connect()
conn.connect()
except sqlalchemy.exc.OperationalError:
msg = "{0} - can't connect to {1} server".format(cls, cls.flavor)
pytest.skip(msg)
def setup_method(self, method):
self.setup_connect()
self._load_iris_data()
self._load_raw_sql()
self._load_test1_data()
@classmethod
def setup_import(cls):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
pytest.skip('SQLAlchemy not installed')
@classmethod
def setup_driver(cls):
raise NotImplementedError()
@classmethod
def connect(cls):
raise NotImplementedError()
def setup_connect(self):
try:
self.conn = self.connect()
self.pandasSQL = sql.SQLDatabase(self.conn)
# to test if connection can be made:
self.conn.connect()
except sqlalchemy.exc.OperationalError:
pytest.skip(
"Can't connect to {0} server".format(self.flavor))
def test_aread_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, 'temp_frame')
assert temp_conn.has_table('temp_frame')
def test_drop_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, 'temp_frame')
assert temp_conn.has_table('temp_frame')
pandasSQL.drop_table('temp_frame')
assert not temp_conn.has_table('temp_frame')
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_sql_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_sql_table(
"iris", con=self.conn, columns=['SepalLength', 'SepalLength'])
tm.equalContents(
iris_frame.columns.values, ['SepalLength', 'SepalLength'])
def test_read_table_absent(self):
pytest.raises(
ValueError, sql.read_sql_table, "this_doesnt_exist", con=self.conn)
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
assert issubclass(df.BoolCol.dtype.type, np.bool_)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA values becomes object
assert issubclass(df.BoolColWithNull.dtype.type, np.object)
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
df = DataFrame(data={'i64': [2**62]})
df.to_sql('test_bigint', self.conn, index=False)
result = sql.read_sql_table('test_bigint', self.conn)
tm.assert_frame_equal(df, result)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
assert issubclass(df.DateCol.dtype.type, np.datetime64)
def test_datetime_with_timezone(self):
# edge case that converts postgresql datetime with time zone types
# to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok
# but should be more natural, so coerce to datetime64[ns] for now
def check(col):
# check that a column is either datetime64[ns]
# or datetime64[ns, UTC]
if is_datetime64_dtype(col.dtype):
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
assert col[0] == Timestamp('2000-01-01 08:00:00')
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
assert col[1] == Timestamp('2000-06-01 07:00:00')
elif is_datetime64tz_dtype(col.dtype):
assert str(col.dt.tz) == 'UTC'
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
assert col[0] == Timestamp('2000-01-01 08:00:00', tz='UTC')
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
assert col[1] == Timestamp('2000-06-01 07:00:00', tz='UTC')
else:
raise AssertionError("DateCol loaded with incorrect type "
"-> {0}".format(col.dtype))
# GH11216
df = pd.read_sql_query("select * from types_test_data", self.conn)
if not hasattr(df, 'DateColWithTz'):
pytest.skip("no column with datetime with time zone")
# this is parsed on Travis (linux), but not on macosx for some reason
# even with the same versions of psycopg2 & sqlalchemy, possibly a
# Postgrsql server version difference
col = df.DateColWithTz
assert (is_object_dtype(col.dtype) or
is_datetime64_dtype(col.dtype) or
is_datetime64tz_dtype(col.dtype))
df = pd.read_sql_query("select * from types_test_data",
self.conn, parse_dates=['DateColWithTz'])
if not hasattr(df, 'DateColWithTz'):
pytest.skip("no column with datetime with time zone")
check(df.DateColWithTz)
df = pd.concat(list(pd.read_sql_query("select * from types_test_data",
self.conn, chunksize=1)),
ignore_index=True)
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == 'UTC'
expected = sql.read_sql_table("types_test_data", self.conn)
tm.assert_series_equal(df.DateColWithTz,
expected.DateColWithTz
.astype('datetime64[ns, UTC]'))
# xref #7139
# this might or might not be converted depending on the postgres driver
df = sql.read_sql_table("types_test_data", self.conn)
check(df.DateColWithTz)
def test_date_parsing(self):
# No Parsing
df = sql.read_sql_table("types_test_data", self.conn)
df = sql.read_sql_table("types_test_data", self.conn,
parse_dates=['DateCol'])
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table("types_test_data", self.conn,
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table("types_test_data", self.conn, parse_dates={
'DateCol': {'format': '%Y-%m-%d %H:%M:%S'}})
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates=['IntDateCol'])
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': 's'})
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table("types_test_data", self.conn,
parse_dates={'IntDateCol': {'unit': 's'}})
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_datetime(self):
df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3),
'B': np.arange(3.0)})
df.to_sql('test_datetime', self.conn)
# with read_table -> type information from schema used
result = sql.read_sql_table('test_datetime', self.conn)
result = result.drop('index', axis=1)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn)
result = result.drop('index', axis=1)
if self.flavor == 'sqlite':
assert isinstance(result.loc[0, 'A'], string_types)
result['A'] = to_datetime(result['A'])
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_NaT(self):
df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3),
'B': np.arange(3.0)})
df.loc[1, 'A'] = np.nan
df.to_sql('test_datetime', self.conn, index=False)
# with read_table -> type information from schema used
result = sql.read_sql_table('test_datetime', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn)
if self.flavor == 'sqlite':
assert isinstance(result.loc[0, 'A'], string_types)
result['A'] = to_datetime(result['A'], errors='coerce')
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql('test_date', self.conn, index=False)
res = read_sql_table('test_date', self.conn)
# comes back as datetime64
tm.assert_series_equal(res['a'], to_datetime(df['a']))
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql('test_time', self.conn, index=False)
res = read_sql_table('test_time', self.conn)
tm.assert_frame_equal(res, df)
# GH8341
# first, use the fallback to have the sqlite adapter put in place
sqlite_conn = TestSQLiteFallback.connect()
sql.to_sql(df, "test_time2", sqlite_conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_time2", sqlite_conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res) # check if adapter is in place
# then test if sqlalchemy is unaffected by the sqlite adapter
sql.to_sql(df, "test_time3", self.conn, index=False)
if self.flavor == 'sqlite':
res = sql.read_sql_query("SELECT * FROM test_time3", self.conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res)
res = sql.read_sql_table("test_time3", self.conn)
tm.assert_frame_equal(df, res)
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Series(2**25 + 1, dtype=np.int32)
s2 = Series(0.0, dtype=np.float32)
df = DataFrame({'s1': s1, 's2': s2})
# write and read again
df.to_sql("test_read_write", self.conn, index=False)
df2 = sql.read_sql_table("test_read_write", self.conn)
tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True)
def test_nan_numeric(self):
# NaNs in numeric float column
df = DataFrame({'A': [0, 1, 2], 'B': [0.2, np.nan, 5.6]})
df.to_sql('test_nan', self.conn, index=False)
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def test_nan_fullcolumn(self):
# full NaN column (numeric float column)
df = DataFrame({'A': [0, 1, 2], 'B': [np.nan, np.nan, np.nan]})
df.to_sql('test_nan', self.conn, index=False)
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> not type info from table -> stays None
df['B'] = df['B'].astype('object')
df['B'] = None
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def test_nan_string(self):
# NaNs in string column
df = DataFrame({'A': [0, 1, 2], 'B': ['a', 'b', np.nan]})
df.to_sql('test_nan', self.conn, index=False)
# NaNs are coming back as None
df.loc[2, 'B'] = None
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes(tbl_name)
ixs = [i['column_names'] for i in ixs]
return ixs
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def test_get_schema_create_table(self):
# Use a dataframe without a bool column, since MySQL converts bool to
# TINYINT (which read_sql_table returns as an int and causes a dtype
# mismatch)
self._load_test3_data()
tbl = 'test_get_schema_create_table'
create_sql = sql.get_schema(self.test_frame3, tbl, con=self.conn)
blank_test_df = self.test_frame3.iloc[:0]
self.drop_table(tbl)
self.conn.execute(create_sql)
returned_df = sql.read_sql_table(tbl, self.conn)
tm.assert_frame_equal(returned_df, blank_test_df,
check_index_type=False)
self.drop_table(tbl)
def test_dtype(self):
cols = ['A', 'B']
data = [(0.8, True),
(0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql('dtype_test', self.conn)
df.to_sql('dtype_test2', self.conn, dtype={'B': sqlalchemy.TEXT})
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltype = meta.tables['dtype_test2'].columns['B'].type
assert isinstance(sqltype, sqlalchemy.TEXT)
pytest.raises(ValueError, df.to_sql,
'error', self.conn, dtype={'B': str})
# GH9083
df.to_sql('dtype_test3', self.conn, dtype={'B': sqlalchemy.String(10)})
meta.reflect()
sqltype = meta.tables['dtype_test3'].columns['B'].type
assert isinstance(sqltype, sqlalchemy.String)
assert sqltype.length == 10
# single dtype
df.to_sql('single_dtype_test', self.conn, dtype=sqlalchemy.TEXT)
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltypea = meta.tables['single_dtype_test'].columns['A'].type
sqltypeb = meta.tables['single_dtype_test'].columns['B'].type
assert isinstance(sqltypea, sqlalchemy.TEXT)
assert isinstance(sqltypeb, sqlalchemy.TEXT)
def test_notnull_dtype(self):
cols = {'Bool': Series([True, None]),
'Date': Series([datetime(2012, 5, 1), None]),
'Int': Series([1, None], dtype='object'),
'Float': Series([1.1, None])
}
df = DataFrame(cols)
tbl = 'notnull_dtype_test'
df.to_sql(tbl, self.conn)
returned_df = sql.read_sql_table(tbl, self.conn) # noqa
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
if self.flavor == 'mysql':
my_type = sqltypes.Integer
else:
my_type = sqltypes.Boolean
col_dict = meta.tables[tbl].columns
assert isinstance(col_dict['Bool'].type, my_type)
assert isinstance(col_dict['Date'].type, sqltypes.DateTime)
assert isinstance(col_dict['Int'].type, sqltypes.Integer)
assert isinstance(col_dict['Float'].type, sqltypes.Float)
def test_double_precision(self):
V = 1.23456789101112131415
df = DataFrame({'f32': Series([V, ], dtype='float32'),
'f64': Series([V, ], dtype='float64'),
'f64_as_f32': Series([V, ], dtype='float64'),
'i32': Series([5, ], dtype='int32'),
'i64': Series([5, ], dtype='int64'),
})
df.to_sql('test_dtypes', self.conn, index=False, if_exists='replace',
dtype={'f64_as_f32': sqlalchemy.Float(precision=23)})
res = sql.read_sql_table('test_dtypes', self.conn)
# check precision of float64
assert (np.round(df['f64'].iloc[0], 14) ==
np.round(res['f64'].iloc[0], 14))
# check sql types
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
col_dict = meta.tables['test_dtypes'].columns
assert str(col_dict['f32'].type) == str(col_dict['f64_as_f32'].type)
assert isinstance(col_dict['f32'].type, sqltypes.Float)
assert isinstance(col_dict['f64'].type, sqltypes.Float)
assert isinstance(col_dict['i32'].type, sqltypes.Integer)
assert isinstance(col_dict['i64'].type, sqltypes.BigInteger)
def test_connectable_issue_example(self):
# This tests the example raised in issue
# https://github.com/pandas-dev/pandas/issues/10104
def foo(connection):
query = 'SELECT test_foo_data FROM test_foo_data'
return sql.read_sql_query(query, con=connection)
def bar(connection, data):
data.to_sql(name='test_foo_data',
con=connection, if_exists='append')
def main(connectable):
with connectable.connect() as conn:
with conn.begin():
foo_data = conn.run_callable(foo)
conn.run_callable(bar, foo_data)
DataFrame({'test_foo_data': [0, 1, 2]}).to_sql(
'test_foo_data', self.conn)
main(self.conn)
def test_temporary_table(self):
test_data = u'Hello, World!'
expected = DataFrame({'spam': [test_data]})
Base = declarative.declarative_base()
class Temporary(Base):
__tablename__ = 'temp_test'
__table_args__ = {'prefixes': ['TEMPORARY']}
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
spam = sqlalchemy.Column(sqlalchemy.Unicode(30), nullable=False)
Session = sa_session.sessionmaker(bind=self.conn)
session = Session()
with session.transaction:
conn = session.connection()
Temporary.__table__.create(conn)
session.add(Temporary(spam=test_data))
session.flush()
df = sql.read_sql_query(
sql=sqlalchemy.select([Temporary.spam]),
con=conn,
)
tm.assert_frame_equal(df, expected)
class _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy):
def test_transactions(self):
pytest.skip(
"Nested transactions rollbacks don't work with Pandas")
class _TestSQLiteAlchemy(object):
"""
Test the sqlalchemy backend against an in-memory sqlite database.
"""
flavor = 'sqlite'
@classmethod
def connect(cls):
return sqlalchemy.create_engine('sqlite:///:memory:')
@classmethod
def setup_driver(cls):
# sqlite3 is built-in
cls.driver = None
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# sqlite has no boolean type, so integer type is returned
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Non-native Bool column with NA values stays as float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
def test_bigint_warning(self):
# test no warning for BIGINT (to support int64) is raised (GH7433)
df = DataFrame({'a': [1, 2]}, dtype='int64')
df.to_sql('test_bigintwarning', self.conn, index=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sql.read_sql_table('test_bigintwarning', self.conn)
assert len(w) == 0
class _TestMySQLAlchemy(object):
"""
Test the sqlalchemy backend against an MySQL database.
"""
flavor = 'mysql'
@classmethod
def connect(cls):
url = 'mysql+{driver}://root@localhost/pandas_nosetest'
return sqlalchemy.create_engine(url.format(driver=cls.driver))
@classmethod
def setup_driver(cls):
try:
import pymysql # noqa
cls.driver = 'pymysql'
except ImportError:
pytest.skip('pymysql not installed')
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# MySQL has no real BOOL type (it's an alias for TINYINT)
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA = int column with NA values => becomes float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_read_procedure(self):
# see GH7324. Although it is more an api test, it is added to the
# mysql tests as sqlite does not have stored procedures
df = DataFrame({'a': [1, 2, 3], 'b': [0.1, 0.2, 0.3]})
df.to_sql('test_procedure', self.conn, index=False)
proc = """DROP PROCEDURE IF EXISTS get_testdb;
CREATE PROCEDURE get_testdb ()
BEGIN
SELECT * FROM test_procedure;
END"""
connection = self.conn.connect()
trans = connection.begin()
try:
r1 = connection.execute(proc) # noqa
trans.commit()
except:
trans.rollback()
raise
res1 = sql.read_sql_query("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res1)
# test delegation to read_sql_query
res2 = sql.read_sql("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res2)
class _TestPostgreSQLAlchemy(object):
"""
Test the sqlalchemy backend against an PostgreSQL database.
"""
flavor = 'postgresql'
@classmethod
def connect(cls):
url = 'postgresql+{driver}://postgres@localhost/pandas_nosetest'
return sqlalchemy.create_engine(url.format(driver=cls.driver))
@classmethod
def setup_driver(cls):
try:
import psycopg2 # noqa
cls.driver = 'psycopg2'
except ImportError:
pytest.skip('psycopg2 not installed')
def test_schema_support(self):
# only test this for postgresql (schema's not supported in
# mysql/sqlite)
df = DataFrame({'col1': [1, 2], 'col2': [
0.1, 0.2], 'col3': ['a', 'n']})
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe to different schema's
df.to_sql('test_schema_public', self.conn, index=False)
df.to_sql('test_schema_public_explicit', self.conn, index=False,
schema='public')
df.to_sql('test_schema_other', self.conn, index=False, schema='other')
# read dataframes back in
res1 = sql.read_sql_table('test_schema_public', self.conn)
tm.assert_frame_equal(df, res1)
res2 = sql.read_sql_table('test_schema_public_explicit', self.conn)
tm.assert_frame_equal(df, res2)
res3 = sql.read_sql_table('test_schema_public_explicit', self.conn,
schema='public')
tm.assert_frame_equal(df, res3)
res4 = sql.read_sql_table('test_schema_other', self.conn,
schema='other')
tm.assert_frame_equal(df, res4)
pytest.raises(ValueError, sql.read_sql_table, 'test_schema_other',
self.conn, schema='public')
# different if_exists options
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe with different if_exists options
df.to_sql('test_schema_other', self.conn, schema='other', index=False)
df.to_sql('test_schema_other', self.conn, schema='other', index=False,
if_exists='replace')
df.to_sql('test_schema_other', self.conn, schema='other', index=False,
if_exists='append')
res = sql.read_sql_table(
'test_schema_other', self.conn, schema='other')
tm.assert_frame_equal(concat([df, df], ignore_index=True), res)
# specifying schema in user-provided meta
# The schema won't be applied on another Connection
# because of transactional schemas
if isinstance(self.conn, sqlalchemy.engine.Engine):
engine2 = self.connect()
meta = sqlalchemy.MetaData(engine2, schema='other')
pdsql = sql.SQLDatabase(engine2, meta=meta)
pdsql.to_sql(df, 'test_schema_other2', index=False)
pdsql.to_sql(df, 'test_schema_other2',
index=False, if_exists='replace')
pdsql.to_sql(df, 'test_schema_other2',
index=False, if_exists='append')
res1 = sql.read_sql_table(
'test_schema_other2', self.conn, schema='other')
res2 = pdsql.read_table('test_schema_other2')
tm.assert_frame_equal(res1, res2)
@pytest.mark.single
class TestMySQLAlchemy(_TestMySQLAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
class TestMySQLAlchemyConn(_TestMySQLAlchemy, _TestSQLAlchemyConn):
pass
@pytest.mark.single
class TestPostgreSQLAlchemy(_TestPostgreSQLAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
class TestPostgreSQLAlchemyConn(_TestPostgreSQLAlchemy, _TestSQLAlchemyConn):
pass
@pytest.mark.single
class TestSQLiteAlchemy(_TestSQLiteAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
class TestSQLiteAlchemyConn(_TestSQLiteAlchemy, _TestSQLAlchemyConn):
pass
# -----------------------------------------------------------------------------
# -- Test Sqlite / MySQL fallback
@pytest.mark.single
class TestSQLiteFallback(SQLiteMixIn, PandasSQLTest):
"""
Test the fallback mode against an in-memory sqlite database.
"""
flavor = 'sqlite'
@classmethod
def connect(cls):
return sqlite3.connect(':memory:')
def setup_method(self, method):
self.conn = self.connect()
self.pandasSQL = sql.SQLiteDatabase(self.conn)
self._load_iris_data()
self._load_test1_data()
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_create_and_drop_table(self):
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
self.pandasSQL.to_sql(temp_frame, 'drop_test_frame')
assert self.pandasSQL.has_table('drop_test_frame')
self.pandasSQL.drop_table('drop_test_frame')
assert not self.pandasSQL.has_table('drop_test_frame')
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql('test_date', self.conn, index=False)
res = read_sql_query('SELECT * FROM test_date', self.conn)
if self.flavor == 'sqlite':
# comes back as strings
tm.assert_frame_equal(res, df.astype(str))
elif self.flavor == 'mysql':
tm.assert_frame_equal(res, df)
def test_datetime_time(self):
# test support for datetime.time, GH #8341
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql('test_time', self.conn, index=False)
res = read_sql_query('SELECT * FROM test_time', self.conn)
if self.flavor == 'sqlite':
# comes back as strings
expected = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(res, expected)
def _get_index_columns(self, tbl_name):
ixs = sql.read_sql_query(
"SELECT * FROM sqlite_master WHERE type = 'index' " +
"AND tbl_name = '%s'" % tbl_name, self.conn)
ix_cols = []
for ix_name in ixs.name:
ix_info = sql.read_sql_query(
"PRAGMA index_info(%s)" % ix_name, self.conn)
ix_cols.append(ix_info.name.tolist())
return ix_cols
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
if PY36:
pytest.skip("not working on python > 3.5")
self._transaction_test()
def _get_sqlite_column_type(self, table, column):
recs = self.conn.execute('PRAGMA table_info(%s)' % table)
for cid, name, ctype, not_null, default, pk in recs:
if name == column:
return ctype
raise ValueError('Table %s, column %s not found' % (table, column))
def test_dtype(self):
if self.flavor == 'mysql':
pytest.skip('Not applicable to MySQL legacy')
cols = ['A', 'B']
data = [(0.8, True),
(0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql('dtype_test', self.conn)
df.to_sql('dtype_test2', self.conn, dtype={'B': 'STRING'})
# sqlite stores Boolean values as INTEGER
assert self._get_sqlite_column_type(
'dtype_test', 'B') == 'INTEGER'
assert self._get_sqlite_column_type(
'dtype_test2', 'B') == 'STRING'
pytest.raises(ValueError, df.to_sql,
'error', self.conn, dtype={'B': bool})
# single dtype
df.to_sql('single_dtype_test', self.conn, dtype='STRING')
assert self._get_sqlite_column_type(
'single_dtype_test', 'A') == 'STRING'
assert self._get_sqlite_column_type(
'single_dtype_test', 'B') == 'STRING'
def test_notnull_dtype(self):
if self.flavor == 'mysql':
pytest.skip('Not applicable to MySQL legacy')
cols = {'Bool': Series([True, None]),
'Date': Series([datetime(2012, 5, 1), None]),
'Int': Series([1, None], dtype='object'),
'Float': Series([1.1, None])
}
df = DataFrame(cols)
tbl = 'notnull_dtype_test'
df.to_sql(tbl, self.conn)
assert self._get_sqlite_column_type(tbl, 'Bool') == 'INTEGER'
assert self._get_sqlite_column_type(tbl, 'Date') == 'TIMESTAMP'
assert self._get_sqlite_column_type(tbl, 'Int') == 'INTEGER'
assert self._get_sqlite_column_type(tbl, 'Float') == 'REAL'
def test_illegal_names(self):
# For sqlite, these should work fine
df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
# Raise error on blank
pytest.raises(ValueError, df.to_sql, "", self.conn)
for ndx, weird_name in enumerate(
['test_weird_name]', 'test_weird_name[',
'test_weird_name`', 'test_weird_name"', 'test_weird_name\'',
'_b.test_weird_name_01-30', '"_b.test_weird_name_01-30"',
'99beginswithnumber', '12345', u'\xe9']):
df.to_sql(weird_name, self.conn)
sql.table_exists(weird_name, self.conn)
df2 = DataFrame([[1, 2], [3, 4]], columns=['a', weird_name])
c_tbl = 'test_weird_col_name%d' % ndx
df2.to_sql(c_tbl, self.conn)
sql.table_exists(c_tbl, self.conn)
# -----------------------------------------------------------------------------
# -- Old tests from 0.13.1 (before refactor using sqlalchemy)
_formatters = {
datetime: lambda dt: "'%s'" % date_format(dt),
str: lambda x: "'%s'" % x,
np.str_: lambda x: "'%s'" % x,
compat.text_type: lambda x: "'%s'" % x,
compat.binary_type: lambda x: "'%s'" % x,
float: lambda x: "%.8f" % x,
int: lambda x: "%s" % x,
type(None): lambda x: "NULL",
np.float64: lambda x: "%.10f" % x,
bool: lambda x: "'%s'" % x,
}
def format_query(sql, *args):
"""
"""
processed_args = []
for arg in args:
if isinstance(arg, float) and isnull(arg):
arg = None
formatter = _formatters[type(arg)]
processed_args.append(formatter(arg))
return sql % tuple(processed_args)
def tquery(query, con=None, cur=None):
"""Replace removed sql.tquery function"""
res = sql.execute(query, con=con, cur=cur).fetchall()
if res is None:
return None
else:
return list(res)
def _skip_if_no_pymysql():
try:
import pymysql # noqa
except ImportError:
pytest.skip('pymysql not installed, skipping')
@pytest.mark.single
class TestXSQLite(SQLiteMixIn):
def setup_method(self, method):
self.method = method
self.conn = sqlite3.connect(':memory:')
def test_basic(self):
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
frame = tm.makeTimeDataFrame()
frame.iloc[0, 0] = np.nan
create_sql = sql.get_schema(frame, 'test')
cur = self.conn.cursor()
cur.execute(create_sql)
cur = self.conn.cursor()
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
tquery(fmt_sql, cur=cur)
self.conn.commit()
result = sql.read_sql("select * from test", con=self.conn)
result.index = frame.index
tm.assert_frame_equal(result, frame)
def test_execute(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test')
cur = self.conn.cursor()
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (?, ?, ?, ?)"
row = frame.iloc[0]
sql.execute(ins, self.conn, params=tuple(row))
self.conn.commit()
result = sql.read_sql("select * from test", self.conn)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test')
lines = create_sql.splitlines()
for l in lines:
tokens = l.split(' ')
if len(tokens) == 2 and tokens[0] == 'A':
assert tokens[1] == 'DATETIME'
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', keys=['A', 'B'])
lines = create_sql.splitlines()
assert 'PRIMARY KEY ("A", "B")' in create_sql
cur = self.conn.cursor()
cur.execute(create_sql)
@tm.capture_stdout
def test_execute_fail(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.conn.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.conn)
with pytest.raises(Exception):
sql.execute('INSERT INTO test VALUES("foo", "bar", 7)', self.conn)
@tm.capture_stdout
def test_execute_closed_connection(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.conn.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
self.conn.close()
with pytest.raises(Exception):
tquery("select * from test", con=self.conn)
# Initialize connection again (needed for tearDown)
self.setup_method(self.method)
def test_na_roundtrip(self):
pass
def _check_roundtrip(self, frame):
sql.to_sql(frame, name='test_table', con=self.conn, index=False)
result = sql.read_sql("select * from test_table", self.conn)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
expected = frame
tm.assert_frame_equal(result, expected)
frame['txt'] = ['a'] * len(frame)
frame2 = frame.copy()
frame2['Idx'] = Index(lrange(len(frame2))) + 10
sql.to_sql(frame2, name='test_table2', con=self.conn, index=False)
result = sql.read_sql("select * from test_table2", self.conn,
index_col='Idx')
expected = frame.copy()
expected.index = Index(lrange(len(frame2))) + 10
expected.index.name = 'Idx'
tm.assert_frame_equal(expected, result)
def test_keyword_as_column_names(self):
df = DataFrame({'From': np.ones(5)})
sql.to_sql(df, con=self.conn, name='testkeywords', index=False)
def test_onecolumn_of_integer(self):
# GH 3628
# a column_of_integers dataframe should transfer well to sql
mono_df = DataFrame([1, 2], columns=['c0'])
sql.to_sql(mono_df, con=self.conn, name='mono_df', index=False)
# computing the sum via sql
con_x = self.conn
the_sum = sum([my_c0[0]
for my_c0 in con_x.execute("select * from mono_df")])
# it should not fail, and gives 3 ( Issue #3628 )
assert the_sum == 3
result = sql.read_sql("select * from mono_df", con_x)
tm.assert_frame_equal(result, mono_df)
def test_if_exists(self):
df_if_exists_1 = DataFrame({'col1': [1, 2], 'col2': ['A', 'B']})
df_if_exists_2 = DataFrame(
{'col1': [3, 4, 5], 'col2': ['C', 'D', 'E']})
table_name = 'table_if_exists'
sql_select = "SELECT * FROM %s" % table_name
def clean_up(test_table_to_drop):
"""
Drops tables created from individual tests
so no dependencies arise from sequential tests
"""
self.drop_table(test_table_to_drop)
# test if invalid value for if_exists raises appropriate error
pytest.raises(ValueError,
sql.to_sql,
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists='notvalidvalue')
clean_up(table_name)
# test if_exists='fail'
sql.to_sql(frame=df_if_exists_1, con=self.conn,
name=table_name, if_exists='fail')
pytest.raises(ValueError,
sql.to_sql,
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists='fail')
# test if_exists='replace'
sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
if_exists='replace', index=False)
assert tquery(sql_select, con=self.conn) == [(1, 'A'), (2, 'B')]
sql.to_sql(frame=df_if_exists_2, con=self.conn, name=table_name,
if_exists='replace', index=False)
assert (tquery(sql_select, con=self.conn) ==
[(3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
# test if_exists='append'
sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
if_exists='fail', index=False)
assert tquery(sql_select, con=self.conn) == [(1, 'A'), (2, 'B')]
sql.to_sql(frame=df_if_exists_2, con=self.conn, name=table_name,
if_exists='append', index=False)
assert (tquery(sql_select, con=self.conn) ==
[(1, 'A'), (2, 'B'), (3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
@pytest.mark.single
class TestSQLFlavorDeprecation(object):
"""
gh-13611: test that the 'flavor' parameter
is appropriately deprecated by checking the
functions that directly raise the warning
"""
con = 1234 # don't need real connection for this
funcs = ['SQLiteDatabase', 'pandasSQL_builder']
def test_unsupported_flavor(self):
msg = 'is not supported'
for func in self.funcs:
tm.assert_raises_regex(ValueError, msg, getattr(sql, func),
self.con, flavor='mysql')
def test_deprecated_flavor(self):
for func in self.funcs:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
getattr(sql, func)(self.con, flavor='sqlite')
@pytest.mark.single
@pytest.mark.skip(reason="gh-13611: there is no support for MySQL "
"if SQLAlchemy is not installed")
class TestXMySQL(MySQLMixIn):
@classmethod
def setup_class(cls):
_skip_if_no_pymysql()
# test connection
import pymysql
try:
# Try Travis defaults.
# No real user should allow root access with a blank password.
pymysql.connect(host='localhost', user='root', passwd='',
db='pandas_nosetest')
except:
pass
else:
return
try:
pymysql.connect(read_default_group='pandas')
except pymysql.ProgrammingError:
pytest.skip(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
except pymysql.Error:
pytest.skip(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
def setup_method(self, method):
_skip_if_no_pymysql()
import pymysql
try:
# Try Travis defaults.
# No real user should allow root access with a blank password.
self.conn = pymysql.connect(host='localhost', user='root',
passwd='', db='pandas_nosetest')
except:
pass
else:
return
try:
self.conn = pymysql.connect(read_default_group='pandas')
except pymysql.ProgrammingError:
pytest.skip(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
except pymysql.Error:
pytest.skip(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
self.method = method
def test_basic(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
frame.iloc[0, 0] = np.nan
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test')
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
tquery(fmt_sql, cur=cur)
self.conn.commit()
result = sql.read_sql("select * from test", con=self.conn)
result.index = frame.index
tm.assert_frame_equal(result, frame)
def test_chunksize_read_type(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
frame.index.name = "index"
drop_sql = "DROP TABLE IF EXISTS test"
cur = self.conn.cursor()
cur.execute(drop_sql)
sql.to_sql(frame, name='test', con=self.conn)
query = "select * from test"
chunksize = 5
chunk_gen = pd.read_sql_query(sql=query, con=self.conn,
chunksize=chunksize, index_col="index")
chunk_df = next(chunk_gen)
tm.assert_frame_equal(frame[:chunksize], chunk_df)
def test_execute(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test')
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
row = frame.iloc[0].values.tolist()
sql.execute(ins, self.conn, params=tuple(row))
self.conn.commit()
result = sql.read_sql("select * from test", self.conn)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test')
lines = create_sql.splitlines()
for l in lines:
tokens = l.split(' ')
if len(tokens) == 2 and tokens[0] == 'A':
assert tokens[1] == 'DATETIME'
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test', keys=['A', 'B'])
lines = create_sql.splitlines()
assert 'PRIMARY KEY (`A`, `B`)' in create_sql
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
@tm.capture_stdout
def test_execute_fail(self):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a(5), b(5))
);
"""
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.conn)
with pytest.raises(Exception):
sql.execute('INSERT INTO test VALUES("foo", "bar", 7)', self.conn)
@tm.capture_stdout
def test_execute_closed_connection(self):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a(5), b(5))
);
"""
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
self.conn.close()
with pytest.raises(Exception):
tquery("select * from test", con=self.conn)
# Initialize connection again (needed for tearDown)
self.setup_method(self.method)
def test_na_roundtrip(self):
_skip_if_no_pymysql()
pass
def _check_roundtrip(self, frame):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
sql.to_sql(frame, name='test_table', con=self.conn, index=False)
result = sql.read_sql("select * from test_table", self.conn)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
result.index.name = frame.index.name
expected = frame
tm.assert_frame_equal(result, expected)
frame['txt'] = ['a'] * len(frame)
frame2 = frame.copy()
index = Index(lrange(len(frame2))) + 10
frame2['Idx'] = index
drop_sql = "DROP TABLE IF EXISTS test_table2"
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
sql.to_sql(frame2, name='test_table2',
con=self.conn, index=False)
result = sql.read_sql("select * from test_table2", self.conn,
index_col='Idx')
expected = frame.copy()
# HACK! Change this once indexes are handled properly.
expected.index = index
expected.index.names = result.index.names
tm.assert_frame_equal(expected, result)
def test_keyword_as_column_names(self):
_skip_if_no_pymysql()
df = DataFrame({'From': np.ones(5)})
sql.to_sql(df, con=self.conn, name='testkeywords',
if_exists='replace', index=False)
def test_if_exists(self):
_skip_if_no_pymysql()
df_if_exists_1 = DataFrame({'col1': [1, 2], 'col2': ['A', 'B']})
df_if_exists_2 = DataFrame(
{'col1': [3, 4, 5], 'col2': ['C', 'D', 'E']})
table_name = 'table_if_exists'
sql_select = "SELECT * FROM %s" % table_name
def clean_up(test_table_to_drop):
"""
Drops tables created from individual tests
so no dependencies arise from sequential tests
"""
self.drop_table(test_table_to_drop)
# test if invalid value for if_exists raises appropriate error
pytest.raises(ValueError,
sql.to_sql,
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists='notvalidvalue')
clean_up(table_name)
# test if_exists='fail'
sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
if_exists='fail', index=False)
pytest.raises(ValueError,
sql.to_sql,
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists='fail')
# test if_exists='replace'
sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
if_exists='replace', index=False)
assert tquery(sql_select, con=self.conn) == [(1, 'A'), (2, 'B')]
sql.to_sql(frame=df_if_exists_2, con=self.conn, name=table_name,
if_exists='replace', index=False)
assert (tquery(sql_select, con=self.conn) ==
[(3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
# test if_exists='append'
sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
if_exists='fail', index=False)
assert tquery(sql_select, con=self.conn) == [(1, 'A'), (2, 'B')]
sql.to_sql(frame=df_if_exists_2, con=self.conn, name=table_name,
if_exists='append', index=False)
assert (tquery(sql_select, con=self.conn) ==
[(1, 'A'), (2, 'B'), (3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
| mit |
dereknewman/cancer_detection | step4_train_submissions.py | 2 | 18086 | import settings
import helpers
import sys
import os
from collections import defaultdict
import glob
import random
import pandas
import ntpath
import numpy
from sklearn import cross_validation
import xgboost
from sklearn.metrics import log_loss
def combine_nodule_predictions(dirs, train_set=True, nodule_th=0.5, extensions=[""]):
print("Combining nodule predictions: ", "Train" if train_set else "Submission")
if train_set:
labels_df = pandas.read_csv("resources/stage1_labels.csv")
else:
labels_df = pandas.read_csv("resources/stage2_sample_submission.csv")
mass_df = pandas.read_csv(settings.BASE_DIR + "masses_predictions.csv")
mass_df.set_index(["patient_id"], inplace=True)
# meta_df = pandas.read_csv(settings.BASE_DIR + "patient_metadata.csv")
# meta_df.set_index(["patient_id"], inplace=True)
data_rows = []
for index, row in labels_df.iterrows():
patient_id = row["id"]
# mask = helpers.load_patient_images(patient_id, settings.EXTRACTED_IMAGE_DIR, "*_m.png")
print(len(data_rows), " : ", patient_id)
# if len(data_rows) > 19:
# break
cancer_label = row["cancer"]
mass_pred = int(mass_df.loc[patient_id]["prediction"])
# meta_row = meta_df.loc[patient_id]
# z_scale = meta_row["slice_thickness"]
# x_scale = meta_row["spacingx"]
# vendor_low = 1 if "1.2.276.0.28.3.145667764438817.42.13928" in meta_row["instance_id"] else 0
# vendor_high = 1 if "1.3.6.1.4.1.14519.5.2.1.3983.1600" in meta_row["instance_id"] else 0
# row_items = [cancer_label, 0, mass_pred, x_scale, z_scale, vendor_low, vendor_high] # mask.sum()
row_items = [cancer_label, 0, mass_pred] # mask.sum()
for magnification in [1, 1.5, 2]:
pred_df_list = []
for extension in extensions:
src_dir = settings.NDSB3_NODULE_DETECTION_DIR + "predictions" + str(int(magnification * 10)) + extension + "/"
pred_nodules_df = pandas.read_csv(src_dir + patient_id + ".csv")
pred_nodules_df = pred_nodules_df[pred_nodules_df["diameter_mm"] > 0]
pred_nodules_df = pred_nodules_df[pred_nodules_df["nodule_chance"] > nodule_th]
pred_df_list.append(pred_nodules_df)
pred_nodules_df = pandas.concat(pred_df_list, ignore_index=True)
nodule_count = len(pred_nodules_df)
nodule_max = 0
nodule_median = 0
nodule_chance = 0
nodule_sum = 0
coord_z = 0
second_largest = 0
nodule_wmax = 0
count_rows = []
coord_y = 0
coord_x = 0
if len(pred_nodules_df) > 0:
max_index = pred_nodules_df["diameter_mm"].argmax
max_row = pred_nodules_df.loc[max_index]
nodule_max = round(max_row["diameter_mm"], 2)
nodule_chance = round(max_row["nodule_chance"], 2)
nodule_median = round(pred_nodules_df["diameter_mm"].median(), 2)
nodule_wmax = round(nodule_max * nodule_chance, 2)
coord_z = max_row["coord_z"]
coord_y = max_row["coord_y"]
coord_x = max_row["coord_x"]
rows = []
for row_index, row in pred_nodules_df.iterrows():
dist = helpers.get_distance(max_row, row)
if dist > 0.2:
nodule_mal = row["diameter_mm"]
if nodule_mal > second_largest:
second_largest = nodule_mal
rows.append(row)
count_rows = []
for row in rows:
ok = True
for count_row in count_rows:
dist = helpers.get_distance(count_row, row)
if dist < 0.2:
ok = False
if ok:
count_rows.append(row)
nodule_count = len(count_rows)
row_items += [nodule_max, nodule_chance, nodule_count, nodule_median, nodule_wmax, coord_z, second_largest, coord_y, coord_x]
row_items.append(patient_id)
data_rows.append(row_items)
# , "x_scale", "z_scale", "vendor_low", "vendor_high"
columns = ["cancer_label", "mask_size", "mass"]
for magnification in [1, 1.5, 2]:
str_mag = str(int(magnification * 10))
columns.append("mx_" + str_mag)
columns.append("ch_" + str_mag)
columns.append("cnt_" + str_mag)
columns.append("med_" + str_mag)
columns.append("wmx_" + str_mag)
columns.append("crdz_" + str_mag)
columns.append("mx2_" + str_mag)
columns.append("crdy_" + str_mag)
columns.append("crdx_" + str_mag)
columns.append("patient_id")
res_df = pandas.DataFrame(data_rows, columns=columns)
if not os.path.exists(settings.BASE_DIR + "xgboost_trainsets/"):
os.mkdir(settings.BASE_DIR + "xgboost_trainsets/")
target_path = settings.BASE_DIR + "xgboost_trainsets/" "train" + extension + ".csv" if train_set else settings.BASE_DIR + "xgboost_trainsets/" + "submission" + extension + ".csv"
res_df.to_csv(target_path, index=False)
def train_xgboost_on_combined_nodules_ensembletest(fixed_holdout=False, submission_is_fixed_holdout=False, ensemble_lists=[]):
train_cols = ["mass", "mx_10", "mx_20", "mx_15", "crdz_10", "crdz_15", "crdz_20"]
runs = 5 if fixed_holdout else 1000
test_size = 0.1
record_count = 0
seed = random.randint(0, 500) if fixed_holdout else 4242
variants = []
x_variants = dict()
y_variants = dict()
for ensemble in ensemble_lists:
for variant in ensemble:
variants.append(variant)
df_train = pandas.read_csv(settings.BASE_DIR + "xgboost_trainsets/" + "train" + variant + ".csv")
y = df_train["cancer_label"].as_matrix()
y = y.reshape(y.shape[0], 1)
cols = df_train.columns.values.tolist()
cols.remove("cancer_label")
cols.remove("patient_id")
x = df_train[train_cols].as_matrix()
x_variants[variant] = x
record_count = len(x)
y_variants[variant] = y
scores = defaultdict(lambda: [])
ensemble_scores = []
for i in range(runs):
submission_preds_list = defaultdict(lambda: [])
train_preds_list = defaultdict(lambda: [])
holdout_preds_list = defaultdict(lambda: [])
train_test_mask = numpy.random.choice([True, False], record_count, p=[0.8, 0.2])
for variant in variants:
x = x_variants[variant]
y = y_variants[variant]
x_train = x[train_test_mask]
y_train = y[train_test_mask]
x_holdout = x[~train_test_mask]
y_holdout = y[~train_test_mask]
if fixed_holdout:
x_train = x[300:]
y_train = y[300:]
x_holdout = x[:300]
y_holdout = y[:300]
if True:
clf = xgboost.XGBRegressor(max_depth=4,
n_estimators=80, #50
learning_rate=0.05,
min_child_weight=60,
nthread=8,
subsample=0.95, #95
colsample_bytree=0.95, # 95
# subsample=1.00,
# colsample_bytree=1.00,
seed=seed)
#
clf.fit(x_train, y_train, verbose=fixed_holdout and False, eval_set=[(x_train, y_train), (x_holdout, y_holdout)], eval_metric="logloss", early_stopping_rounds=5, )
holdout_preds = clf.predict(x_holdout)
holdout_preds = numpy.clip(holdout_preds, 0.001, 0.999)
# holdout_preds *= 0.93
holdout_preds_list[variant].append(holdout_preds)
train_preds_list[variant].append(holdout_preds.mean())
score = log_loss(y_holdout, holdout_preds, normalize=True)
print(score, "\tbest:\t", clf.best_score, "\titer\t", clf.best_iteration, "\tmean:\t", train_preds_list[-1], "\thomean:\t", y_holdout.mean(), " variant:", variant)
scores[variant].append(score)
total_predictions = []
for ensemble in ensemble_lists:
ensemble_predictions = []
for variant in ensemble:
variant_predictions = numpy.array(holdout_preds_list[variant], dtype=numpy.float)
ensemble_predictions.append(variant_predictions.swapaxes(0, 1))
ensemble_predictions_np = numpy.hstack(ensemble_predictions)
ensemble_predictions_np = ensemble_predictions_np.mean(axis=1)
score = log_loss(y_holdout, ensemble_predictions_np, normalize=True)
print(score)
total_predictions.append(ensemble_predictions_np.reshape(ensemble_predictions_np.shape[0], 1))
total_predictions_np = numpy.hstack(total_predictions)
total_predictions_np = total_predictions_np.mean(axis=1)
score = log_loss(y_holdout, total_predictions_np, normalize=True)
print("Total: ", score)
ensemble_scores.append(score)
print("Average score: ", sum(ensemble_scores) / len(ensemble_scores))
def train_xgboost_on_combined_nodules(extension, fixed_holdout=False, submission=False, submission_is_fixed_holdout=False):
df_train = pandas.read_csv(settings.BASE_DIR + "xgboost_trainsets/" + "train" + extension + ".csv")
if submission:
df_submission = pandas.read_csv(settings.BASE_DIR + "xgboost_trainsets/" + "submission" + extension + ".csv")
submission_y = numpy.zeros((len(df_submission), 1))
if submission_is_fixed_holdout:
df_submission = df_train[:300]
df_train = df_train[300:]
submission_y = df_submission["cancer_label"].as_matrix()
submission_y = submission_y.reshape(submission_y.shape[0], 1)
y = df_train["cancer_label"].as_matrix()
y = y.reshape(y.shape[0], 1)
# print("Mean y: ", y.mean())
cols = df_train.columns.values.tolist()
cols.remove("cancer_label")
cols.remove("patient_id")
train_cols = ["mass", "mx_10", "mx_20", "mx_15", "crdz_10", "crdz_15", "crdz_20"]
x = df_train[train_cols].as_matrix()
if submission:
x_submission = df_submission[train_cols].as_matrix()
if submission_is_fixed_holdout:
x_submission = df_submission[train_cols].as_matrix()
runs = 20 if fixed_holdout else 1000
scores = []
submission_preds_list = []
train_preds_list = []
holdout_preds_list = []
for i in range(runs):
test_size = 0.1 if submission else 0.1
# stratify=y,
x_train, x_holdout, y_train, y_holdout = cross_validation.train_test_split(x, y, test_size=test_size)
# print(y_holdout.mean())
if fixed_holdout:
x_train = x[300:]
y_train = y[300:]
x_holdout = x[:300]
y_holdout = y[:300]
seed = random.randint(0, 500) if fixed_holdout else 4242
if True:
clf = xgboost.XGBRegressor(max_depth=4,
n_estimators=80, #55
learning_rate=0.05,
min_child_weight=60,
nthread=8,
subsample=0.95, #95
colsample_bytree=0.95, # 95
# subsample=1.00,
# colsample_bytree=1.00,
seed=seed)
#
clf.fit(x_train, y_train, verbose=fixed_holdout and False, eval_set=[(x_train, y_train), (x_holdout, y_holdout)], eval_metric="logloss", early_stopping_rounds=5, )
holdout_preds = clf.predict(x_holdout)
holdout_preds = numpy.clip(holdout_preds, 0.001, 0.999)
# holdout_preds *= 0.93
holdout_preds_list.append(holdout_preds)
train_preds_list.append(holdout_preds.mean())
score = log_loss(y_holdout, holdout_preds, normalize=True)
print(score, "\tbest:\t", clf.best_score, "\titer\t", clf.best_iteration, "\tmean:\t", train_preds_list[-1], "\thomean:\t", y_holdout.mean())
scores.append(score)
if submission_is_fixed_holdout:
submission_preds = clf.predict(x_submission)
submission_preds_list.append(submission_preds)
if submission:
submission_preds = clf.predict(x_submission)
submission_preds_list.append(submission_preds)
if fixed_holdout:
all_preds = numpy.vstack(holdout_preds_list)
avg_preds = numpy.average(all_preds, axis=0)
avg_preds[avg_preds < 0.001] = 0.001
avg_preds[avg_preds > 0.999] = 0.999
deltas = numpy.abs(avg_preds.reshape(300) - y_holdout.reshape(300))
df_train = df_train[:300]
df_train["deltas"] = deltas
# df_train.to_csv("c:/tmp/deltas.csv")
loss = log_loss(y_holdout, avg_preds)
print("Fixed holout avg score: ", loss)
# print("Fixed holout mean: ", y_holdout.mean())
if submission:
all_preds = numpy.vstack(submission_preds_list)
avg_preds = numpy.average(all_preds, axis=0)
avg_preds[avg_preds < 0.01] = 0.01
avg_preds[avg_preds > 0.99] = 0.99
submission_preds_list = avg_preds.tolist()
df_submission["id"] = df_submission["patient_id"]
df_submission["cancer"] = submission_preds_list
df_submission = df_submission[["id", "cancer"]]
if not os.path.exists("submission/"):
os.mkdir("submission/")
if not os.path.exists("submission/level1/"):
os.mkdir("submission/level1/")
df_submission.to_csv("submission/level1/s" + extension + ".csv", index=False)
# print("Submission mean chance: ", avg_preds.mean())
if submission_is_fixed_holdout:
all_preds = numpy.vstack(submission_preds_list)
avg_preds = numpy.average(all_preds, axis=0)
avg_preds[avg_preds < 0.01] = 0.01
avg_preds[avg_preds > 0.99] = 0.99
submission_preds_list = avg_preds.tolist()
loss = log_loss(submission_y, submission_preds_list)
# print("First 300 patients : ", loss)
if submission_is_fixed_holdout:
print("First 300 patients score: ", sum(scores) / len(scores), " mean chance: ", sum(train_preds_list) / len(train_preds_list))
else:
print("Average score: ", sum(scores) / len(scores), " mean chance: ", sum(train_preds_list) / len(train_preds_list))
def combine_submissions(level, model_type=None):
print("Combine submissions.. level: ", level, " model_type: ", model_type)
src_dir = "submission/level" + str(level) + "/"
dst_dir = "submission/"
if level == 1:
dst_dir += "level2/"
if not os.path.exists("submission/level2/"):
os.mkdir("submission/level2/")
submission_df = pandas.read_csv("resources/stage2_sample_submission.csv")
submission_df["id2"] = submission_df["id"]
submission_df.set_index(["id2"], inplace=True)
search_expr = "*.csv" if model_type is None else "*" + model_type + "*.csv"
csvs = glob.glob(src_dir + search_expr)
print(len(csvs), " found..")
for submission_idx, submission_path in enumerate(csvs):
print(ntpath.basename(submission_path))
column_name = "s" + str(submission_idx)
submission_df[column_name] = 0
sub_df = pandas.read_csv(submission_path)
for index, row in sub_df.iterrows():
patient_id = row["id"]
cancer = row["cancer"]
submission_df.loc[patient_id, column_name] = cancer
submission_df["cancer"] = 0
for i in range(len(csvs)):
submission_df["cancer"] += submission_df["s" + str(i)]
submission_df["cancer"] /= len(csvs)
if not os.path.exists(dst_dir + "debug/"):
os.mkdir(dst_dir + "debug/")
if level == 2:
target_path = dst_dir + "final_submission.csv"
target_path_allcols = dst_dir + "debug/final_submission.csv"
else:
target_path_allcols = dst_dir + "debug/" + "combined_submission_" + model_type + ".csv"
target_path = dst_dir + "combined_submission_" + model_type + ".csv"
submission_df.to_csv(target_path_allcols, index=False)
submission_df[["id", "cancer"]].to_csv(target_path, index=False)
if __name__ == "__main__":
if True:
for model_variant in ["_luna16_fs", "_luna_posnegndsb_v1", "_luna_posnegndsb_v2"]:
print("Variant: ", model_variant)
if True:
combine_nodule_predictions(None, train_set=True, nodule_th=0.7, extensions=[model_variant])
combine_nodule_predictions(None, train_set=False, nodule_th=0.7, extensions=[model_variant])
if True:
train_xgboost_on_combined_nodules(fixed_holdout=False, submission=True, submission_is_fixed_holdout=False, extension=model_variant)
train_xgboost_on_combined_nodules(fixed_holdout=True, extension=model_variant)
combine_submissions(level=1, model_type="luna_posnegndsb")
combine_submissions(level=1, model_type="luna16_fs")
combine_submissions(level=1, model_type="daniel")
combine_submissions(level=2)
| mit |
neteler/QGIS | python/plugins/processing/algs/qgis/RasterLayerHistogram.py | 5 | 3235 | # -*- coding: utf-8 -*-
"""
***************************************************************************
RasterLayerHistogram.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
from PyQt4.QtCore import QVariant
from qgis.core import QgsField
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterRaster
from processing.core.outputs import OutputTable
from processing.core.outputs import OutputHTML
from processing.tools import dataobjects
from processing.tools import raster
class RasterLayerHistogram(GeoAlgorithm):
INPUT = 'INPUT'
PLOT = 'PLOT'
TABLE = 'TABLE'
BINS = 'BINS'
def defineCharacteristics(self):
self.name = 'Raster layer histogram'
self.group = 'Graphics'
self.addParameter(ParameterRaster(self.INPUT,
self.tr('Input layer')))
self.addParameter(ParameterNumber(self.BINS,
self.tr('Number of bins'), 2, None, 10))
self.addOutput(OutputHTML(self.PLOT, self.tr('Output plot')))
self.addOutput(OutputTable(self.TABLE, self.tr('Output table')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
nbins = self.getParameterValue(self.BINS)
outputplot = self.getOutputValue(self.PLOT)
outputtable = self.getOutputFromName(self.TABLE)
values = raster.scanraster(layer, progress)
# ALERT: this is potentially blocking if the layer is too big
plt.close()
valueslist = []
for v in values:
if v is not None:
valueslist.append(v)
(n, bins, values) = plt.hist(valueslist, nbins)
fields = [QgsField('CENTER_VALUE', QVariant.Double),
QgsField('NUM_ELEM', QVariant.Double)]
writer = outputtable.getTableWriter(fields)
for i in xrange(len(values)):
writer.addRecord([str(bins[i]) + '-' + str(bins[i + 1]), n[i]])
plotFilename = outputplot + '.png'
lab.savefig(plotFilename)
f = open(outputplot, 'w')
f.write('<img src="' + plotFilename + '"/>')
f.close()
| gpl-2.0 |
xzh86/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 35 | 15016 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
sumeetkr/sumeetkr.github.io | markdown_generator/publications.py | 197 | 3887 |
# coding: utf-8
# # Publications markdown generator for academicpages
#
# Takes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook, with the core python code in publications.py. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one that fits your format.
#
# TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.
#
# ## Data format
#
# The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top.
#
# - `excerpt` and `paper_url` can be blank, but the others must have values.
# - `pub_date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]`
# ## Import pandas
#
# We are using the very handy pandas library for dataframes.
# In[2]:
import pandas as pd
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
publications = pd.read_csv("publications.tsv", sep="\t", header=0)
publications
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. If you don't want something to appear (like the "Recommended citation")
# In[5]:
import os
for row, item in publications.iterrows():
md_filename = str(item.pub_date) + "-" + item.url_slug + ".md"
html_filename = str(item.pub_date) + "-" + item.url_slug
year = item.pub_date[:4]
## YAML variables
md = "---\ntitle: \"" + item.title + '"\n'
md += """collection: publications"""
md += """\npermalink: /publication/""" + html_filename
if len(str(item.excerpt)) > 5:
md += "\nexcerpt: '" + html_escape(item.excerpt) + "'"
md += "\ndate: " + str(item.pub_date)
md += "\nvenue: '" + html_escape(item.venue) + "'"
if len(str(item.paper_url)) > 5:
md += "\npaperurl: '" + item.paper_url + "'"
md += "\ncitation: '" + html_escape(item.citation) + "'"
md += "\n---"
## Markdown description for individual page
if len(str(item.paper_url)) > 5:
md += "\n\n<a href='" + item.paper_url + "'>Download paper here</a>\n"
if len(str(item.excerpt)) > 5:
md += "\n" + html_escape(item.excerpt) + "\n"
md += "\nRecommended citation: " + item.citation
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
| apache-2.0 |
scorpionis/docklet | src/dscheduler_test.py | 1 | 26915 | # coding=UTF-8
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
from scipy.stats import norm
from scipy.stats import binom
from scipy.stats import expon
from scipy.stats import genexpon
import math
import random
import numpy as np
from mdkp import Colony
from dmachine_test import AllocationOfMachine
import heapq
from dconnection import *
#import dconnection
import time
import _thread
import logging
import json
import jsonpickle
import os
import threading
import matplotlib.pyplot as plt
from log import slogger
#import log
machine_queue = []
queue_lock = threading.Lock()
# only used for test
task_requests = {}
tasks = {}
machines = {}
restricted_index = 0
node_manager = None
etcdclient = None
recv_stop = False
cov_0 = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
cov_0_0_n1 = [[1, -1, 0], [-1, 1, 0], [0, 0, 1]]
cov_0_0_1 = [[1, 1, 0], [-1, 1, 0], [0, 0, 1]]
cov_1_1_0= [[1, -1, 1], [-1, 1, 1], [1, 1, 1]]
cov_1_1_1 = [[1, 0.9, 0.9], [0.9, 1, 0.9], [0.9, 0.9, 1]]
cov05 = [[1, -0.5, 0.5, -0.5], [-0.5, 1, -0.5, 0.5], [0.5, -0.5, 1, -0.5], [-0.5, 0.5, -0.5, 1]]
cov11 = [[1, -1, 1, -1], [-1, 1, -1, 1], [1, -1, 1, -1], [-1, 1, -1, 1]]
cov00 = [[1, 0, 0.5, -0.5], [0, 1, 0, 0.5], [0.5, 0, 1, 0], [0, 0.5, 0, 1]]
def generate_multivariate_uniform_optimal(cpu,mem,num_tasks):
mean = [0, 0, 0, 0]
cov = cov00
a,b,c,d = np.random.multivariate_normal(mean, cov, num_tasks).T
# for i,ia in enumerate(a):
# print(a[i],b[i],c[i],d[i],'\n')
cpus = []
mems = []
values = []
for ix in a:
cpus.append(norm.cdf(ix)*(cpu/4-1)+1)
for iy in b:
mems.append(norm.cdf(iy)*(mem/4-1)+1)
for index, iz in enumerate(c):
if cpus[index]> mems[index]:
values.append(norm.cdf(iz)*(100-1)+1)
else:
values.append(norm.cdf(d[index])*(100-1)+1)
# for i,icpus in enumerate(cpus):
# print(cpus[i],mems[i],values[i],'\n')
return cpus,mems,values
def generate_multivariate_uniform(cpu,mem,num_tasks):
mean = [0, 0, 0]
# cov = [[1, -1, 0], [-1, 1, 0], [0, 0, 1]]
cov = cov_0
x, y, z = np.random.multivariate_normal(mean, cov, num_tasks).T
cpus = []
mems = []
values = []
for ix in x:
cpus.append(norm.cdf(ix)*(cpu/4-1)+1)
for iy in y:
mems.append(norm.cdf(iy)*(mem/4-1)+1)
for iz in z:
values.append(norm.cdf(iz)*(100-1)+1)
return cpus,mems,values
def generate_multivariate_binomial(cpu,mem,num_tasks):
mean = [0, 0, 0]
cov = [[1, -0.5, -0.5], [-0.5, 1, -0.5], [-0.5, -0.5, 1]]
x, y, z = np.random.multivariate_normal(mean, cov, num_tasks).T
cpus = []
mems = []
values = []
for ix in x:
cpus.append(binom.ppf(norm.cdf(ix),cpu,8/cpu))
for iy in y:
mems.append(binom.ppf(norm.cdf(iy),mem,8/mem))
for iz in z:
values.append(norm.cdf(iz)*(100-1)+1)
# print("cpu mem corr: ", np.corrcoef(cpus,mems)[0, 1])
# print("cpus: ",cpus)
return cpus,mems,values
def generate_multivariate_ec2(cpu,mem,num_tasks):
mean = [0, 0, 0]
cov = [[1, -1, 1], [-1, 1, 1], [1, 1, 1]]
x, y, z = np.random.multivariate_normal(mean, cov, num_tasks).T
cpus = []
mems = []
values = []
for ix in x:
# cpus.append(int(8-round(expon.ppf(norm.cdf(ix),0,0.25))))
cpus.append(norm.cdf(ix)*3+5)
for iy in y:
# mems.append(int(15-round(expon.ppf(norm.cdf(iy),0,0.25))))
mems.append(norm.cdf(iy)*14+1)
for iz in z:
values.append(norm.cdf(iz)*(100-1)+1)
# print("cpu value corr: ", np.corrcoef(cpus,values)[0, 1])
# print("cpus: ",cpus)
# print("mems: ",mems)
# print("values:",values)
return cpus,mems,values
def generate_test_data(cpu,mem,machines,request_type,distribution,id_base):
task_requests = {}
num_tasks = 0
if distribution == 'binomial':
num_tasks = int(32 * machines)
# cpu_arr = np.random.binomial(cpu, 4/cpu, num_tasks)
# mem_arr = np.random.binomial(mem, 1/256, num_tasks)
cpu_arr, mem_arr,bids = generate_multivariate_binomial(cpu,mem,num_tasks)
elif distribution == 'uniform':
num_tasks = int(32 * machines)
# cpu_arr = np.random.uniform(1,cpu,cpu*machines)
# mem_arr = np.random.uniform(1,mem,cpu*machines)
cpu_arr, mem_arr,bids = generate_multivariate_uniform(cpu,mem,num_tasks)
elif distribution == 'ec2':
num_tasks = int(cpu/4 * machines)
# cpu_arr = np.random.uniform(1,cpu,cpu*machines)
# mem_arr = np.random.uniform(1,mem,cpu*machines)
cpu_arr, mem_arr,bids = generate_multivariate_ec2(cpu,mem,num_tasks)
elif distribution == 'ca':
num_tasks = int(32 * machines)
cpu_arr,mem_arr,bids = generate_multivariate_uniform(cpu,mem,num_tasks)
elif distribution == 'ca-optimal':
num_tasks = int(32 * machines)
cpu_arr,mem_arr,bids = generate_multivariate_uniform_optimal(cpu,mem,num_tasks)
for i in range(0+id_base,int(num_tasks)):
if cpu_arr[i]==0 or mem_arr[i] ==0:
continue
if request_type == 'reliable':
task = {
'id': str(i),
'cpus': str(int(math.floor(cpu_arr[i]))),
'mems': str(int(math.floor(mem_arr[i]))),
'bid': str(int(bids[i]))
# 'bid': str(max(int(np.random.normal(cpu_arr[i]+mem_arr[i], 10, 1)[0]),0))
}
else:
task = {
'id': str(i),
'cpus': str(int(math.floor(cpu_arr[i]))),
'mems': str(int(math.floor(mem_arr[i]))),
'bid': 0
}
key = str(i)
task_requests[key] = task
# write to a file
with open("/home/augustin/docklet/test_data/"+distribution+'_tasks'+str(machines)+'.txt','w') as f:
for key, task in task_requests.items():
f.write(str(task['cpus'])+' '+str(task['mems'])+' '+str(task['bid'])+'\n')
f.flush()
os.fsync(f)
return task_requests
def parse_test_data(filename,cpus,mems,machines,request_type):
num_tasks =0
if request_type=="uniform":
num_tasks = cpus * machines
else:
num_tasks = cpus/2*machines
task_requests = {}
with open(filename,'r') as f:
i =0
for line in f.readlines()[0:int(num_tasks)]:
arr = line.split()
task = {
'id': str(i),
'cpus': arr[0],
'mems': arr[1],
'bid': arr[2]
}
key = str(i)
task_requests[key] = task
i+=1
# print(task)
return task_requests
def add_machine(id, cpus=24, mems=240000):
global machines
global machine_queue
machine = AllocationOfMachine(id, cpus, mems)
machines[id] = machine
heapq.heappush(machine_queue,machine)
# to-do:改成多线程,直接运行每个线程
# machine.colony.run()
send_colony("create",machine.machineid, str(machine.reliable_cpus), str(machine.reliable_mems))
sync_colony()
return machine
def pre_allocate(task):
global restricted_index
global queue_lock
if 'bid' in task and task['bid']!='0':
queue_lock.acquire()
machine = heapq.heappop(machine_queue)
task['machineid'] = machine.machineid
task['allocation_type'] = 'none'
task['allocation_cpus'] = str(int(task['cpus'])*1000)
task['allocation_mems'] = task['mems']
task['allocation_mems_sw'] = str( 2 * int(task['mems']) )
task['allocation_mems_soft'] = str( 2 * int(task['mems']) )
tasks[task['id']] = task
machine.pre_cpus_wanted += int(task['cpus'])
machine.pre_mems_wanted += int(task['mems'])
if(machine.pre_cpus_wanted <= machine.reliable_cpus and machine.pre_mems_wanted <= machine.reliable_mems):
machine.placement_heu +=int(task['bid'])
else:
if machine.mem_value == 0:
machine.mem_value = machine.placement_heu/(machine.rareness_ratio * machine.reliable_cpus + machine.reliable_mems)
machine.cpu_value = machine.mem_value * machine.rareness_ratio
heu_incre = int(task['bid']) - int(task['cpus'])* machine.cpu_value - int(task['mems'])*machine.mem_value
if heu_incre > 0:
machine.placement_heu += heu_incre
heapq.heappush(machine_queue,machine)
# time.sleep(0.1)
queue_lock.release()
else:
if(restricted_index >= len(machines)):
restricted_index = 0
slogger.debug("restricted_index: ", restricted_index)
values = list(machines.values())
task['machineid'] = values[restricted_index].machineid
restricted_index += 1
task['allocation_type'] = 'none'
task['allocation_cpus'] = str(int(task['cpus'])*1000)
task['allocation_mems'] = task['mems']
task['allocation_mems_sw'] = str( 2 * int(task['mems']) )
task['allocation_memsp_soft'] = str( 2 * int(task['mems']) )
tasks[task['id']] = task
return task
def allocate(id):
task = tasks[id]
machineid = task['machineid']
machine = machines[machineid]
if 'bid' in task and task['bid']!='0':
# slogger.debug("dispatch reliable")
task = machine.add_reliable_task(task)
# slogger.debug("pop machine: id = %s", machine.machineid)
send_task(machineid,task,"add")
else:
# slogger.debug("dispatch restricted")
task = machine.add_restricted_task(task)
return task
def release(id):
task = tasks[id]
machineid = tasks[id]['machineid']
machine = machines[machineid]
if 'bid' in task and task['bid']!='0':
slogger.debug("release reliable")
machine.release_reliable_task(id)
send_task(machine,task,'delete')
else:
slogger.debug("release restricted")
machine.release_restricted_task(id)
def after_release(id):
task = tasks[id]
for index,machine in enumerate(machine_queue):
if task['machineid'] == machine.machineid:
del machine_queue[index]
break
machine.total_value -= int(task['bid'])
heapq.heappush(machine_queue,machine)
del tasks[id]
def stop_scheduler():
global queue_lock
print("stop scheduler")
queue_lock.acquire()
os.system("kill -9 $(pgrep acommdkp)")
time.sleep(1)
print("close sockets")
close_sync_socket()
close_colony_socket()
close_task_socket()
import dconnection
dconnection.recv_run = False
queue_lock.release()
time.sleep(1)
def init_scheduler():
global queue_lock
#启动c程序,后台运行
os.system("rm -rf /home/augustin/docklet/src/aco-mmdkp.log")
os.system("/home/augustin/docklet/src/aco-mmdkp/acommdkp >/home/augustin/docklet/src/aco-mmdkp.log 2>&1 &")
time.sleep(1)
slogger.setLevel(logging.INFO)
slogger.info("init scheduler!")
print("init scheduler")
init_sync_socket()
init_colony_socket()
init_task_socket()
init_result_socket()
_thread.start_new_thread(recv_result,(machines,machine_queue,queue_lock))
def test_all():
init_scheduler()
for i in range(0,2):
add_machine("m"+str(i),64,256)
slogger.info("add colonies done!")
# requests = generate_test_data(64,256,2,"reliable",'uniform',0)
# generate_test_data(64,256,1,"restricted",192)
requests = parse_test_data('uniform_tasks1.txt',64,256,1,"uniform")
for index,request in requests.items():
pre_allocate(request)
slogger.info("pre allocate tasks done")
for index,request in requests.items():
allocate(request['id'])
slogger.info("allocate tasks done")
time.sleep(10)
for index,request in requests.items():
release(request['id'])
slogger.info("release tasks done")
for index,request in requests.items():
after_release(request['id'])
slogger.info("after release tasks done")
def relax_mdp(tasks,cpus,mems,machines):
cpus = cpus*machines
mems = mems * machines
opt = np.zeros((cpus+1,mems+1))
for key,task in tasks.items():
i_cpu = int(task['cpus'])
i_mem = int(task['mems'])
bid = int(task['bid'])
for j in range(cpus,i_cpu-1,-1):
for k in range(mems,i_mem-1, -1):
# print(j,k)
opt[j][k] = max(opt[j][k],opt[j-i_cpu][k-i_mem]+bid)
# print(opt)
print("relax opt: ",opt[cpus][mems])
return opt[cpus][mems]
def test_quality(num_machines,request_type):
os.system("kill -9 $(pgrep acommdkp)")
init_scheduler()
for i in range(0,num_machines):
add_machine("m"+str(i),64,256)
# Time.sleep(1)
slogger.info("add colonies done!")
# requests = generate_test_data(64,256,2,"reliable",'uniform',0)
# generate_test_data(64,256,1,"restricted",192)
requests = parse_test_data("/home/augustin/docklet/test_data/"+request_type+'_tasks'+str(num_machines)+'.txt',64,256,num_machines,request_type)
i = 0
j=0
for index,request in requests.items():
pre_allocate(request)
allocate(request['id'])
if i == len(requests.items())/num_machines/2:
time.sleep(1)
print("part ",j, " done")
i =0
j+=1
i+=1
slogger.info("pre allocate tasks done")
slogger.info("allocate tasks done")
time.sleep(10)
# generate result quality
total_social_welfare = 0
for i in range(0,num_machines):
total_social_welfare += machines['m'+str(i)].social_welfare
stop_scheduler()
print("MDRPSPA social_welfare: ",total_social_welfare);
return total_social_welfare
# upper = relax_mdp(requests,64,256,num_machines)
# print("upper bound: ", upper)
def test_generate_test_data(num,request_type):
for i in range(1,num+1):
print(i)
generate_test_data(64,256,i,"reliable",request_type,0)
def test_compare_ec2(num_machines, request_type):
os.system("kill -9 $(pgrep acommdkp)")
time.sleep(1)
init_scheduler()
for i in range(0,num_machines):
add_machine("m"+str(i),256,480)
slogger.info("add colonies done!")
time.sleep(1)
requests = parse_test_data("/home/augustin/docklet/test_data/"+request_type+'_tasks'+str(num_machines)+'.txt',256,480,num_machines,request_type)
i = 0
j=0
for index,request in requests.items():
pre_allocate(request)
allocate(request['id'])
if i == len(requests.items())/num_machines*2:
time.sleep(0.5)
print("part ",j, " done")
i =0
j+=1
i+=1
slogger.info("pre allocate tasks done")
slogger.info("allocate tasks done")
time.sleep(10)
# generate result quality
total_social_welfare = 0
for i in range(0,num_machines):
print('m'+str(i)+": social_welfare", machines['m'+str(i)].social_welfare)
print('m'+str(i)+": heu", machines['m'+str(i)].placement_heu)
total_social_welfare += machines['m'+str(i)].social_welfare
print("MDRPSPA social_welfare: ",total_social_welfare);
ec2_social_welfare = 0
newlist = sorted(list(requests.values()), key=lambda k: k['bid'],reverse=True)
for i in range(0,32*num_machines):
ec2_social_welfare += int(newlist[i]['bid'])
print("ec2 social_welfare: ",ec2_social_welfare)
# upper = relax_mdp(requests,256,480,num_machines)
# print("upper bound: ", upper)
stop_scheduler()
return total_social_welfare, ec2_social_welfare
def test_compare_ca(num_machines, request_type):
os.system("kill -9 $(pgrep acommdkp)")
time.sleep(3)
init_scheduler()
for i in range(0,num_machines):
add_machine("m"+str(i),128,256)
slogger.info("add colonies done!")
time.sleep(1)
requests = parse_test_data("/home/augustin/docklet/test_data/"+request_type+'_tasks'+str(num_machines)+'.txt',128,256,num_machines,request_type)
i = 0
j=0
for index,request in requests.items():
pre_allocate(request)
allocate(request['id'])
if i == len(requests.items())/num_machines*2:
time.sleep(0.5)
print("part ",j, " done")
i =0
j+=1
i+=1
slogger.info("pre allocate tasks done")
slogger.info("allocate tasks done")
time.sleep(10)
# generate result quality
total_social_welfare = 0
for i in range(0,num_machines):
# print('m'+str(i)+": social_welfare", machines['m'+str(i)].social_welfare)
# print('m'+str(i)+": heu", machines['m'+str(i)].placement_heu)
total_social_welfare += machines['m'+str(i)].social_welfare
print("MDRA social_welfare: ",total_social_welfare);
# calculate ca-provision social welfare
ca_social_welfare = 0
vmbids = []
for index,request in requests.items():
vmbid = {}
num_vm = max(int(request['cpus']), math.ceil(float(request['mems'])/2))
vmbid['vms'] = num_vm
vmbid['bid'] = float(request['bid'])
vmbid['sort'] = float(request['bid'])/num_vm
vmbids.append(vmbid)
newlist = sorted(vmbids, key=lambda k: k['sort'],reverse=True)
total_capacity = 128 * num_machines
utilized = 0
for vmbid in newlist:
# print("ca bid: ",vmbid['vms']," ",vmbid['bid'])
utilized += vmbid['vms']
if utilized <= total_capacity:
ca_social_welfare += vmbid['bid']
else:
break
print("ca social_welfare: ",ca_social_welfare)
# upper = relax_mdp(requests,256,480,num_machines)
# print("upper bound: ", upper)
stop_scheduler()
return total_social_welfare, ca_social_welfare
def test_compare_ca_stable(num_machines, request_type):
times = int(100/num_machines)
a = 0
b = 0
for i in range(times):
print("machines: ", num_machines," times: ", i)
generate_test_data(128,256,num_machines,"reliable",'ca',0)
ia,ib = test_compare_ca(num_machines,request_type)
a +=ia
b +=ib
a = a/times
b = b/times
print("final result: ", a, b, a/b)
return a,b,a/b
def generate_test11_result(num):
sw1 = []
sw2 = []
for i in range(1,num):
generate_test_data(256,480,i,"reliable",'ec2',0)
i_sw1,i_sw2 = test_compare_ec2(i,'ec2')
sw1.append(i_sw1)
sw2.append(i_sw2)
plt.plot(range(1,num),sw1,color='red')
plt.plot(range(1,num),sw2,color='blue')
plt.xlabel('number of machines')
plt.ylabel('social welfare')
plt.title('Compare Social Welfare of MDRPSPA with EC2')
plt.legend()
plt.savefig("result1.png")
with open("/home/augustin/docklet/test_result/compare_with_ec2.txt",'w') as f:
for i in range(1,num):
f.write(str(sw1[i-1])+' '+str(sw2[i-1])+'\n')
f.flush()
os.fsync(f)
def generate_test12_result():
ratios = []
with open("/home/augustin/docklet/test_result/compare_with_ec2.txt",'r') as f:
for line in f.readlines()[0:99]:
arr = line.split()
ratio = float(arr[0])/float(arr[1])
ratios.append(ratio)
print(len(ratios))
plt.plot(np.array(range(1,100)),np.array(ratios),'k-')
plt.xlabel('number of machines')
plt.ylabel('Ratio of Social welfare of MDRPSPA to EC2')
plt.title('Ratio of Social Welfare of MDRPSPA to EC2')
plt.savefig("result12.png")
def draw_test1_result():
ratios = []
sw1 = []
sw2 = []
with open("/home/augustin/docklet/test_result/compare_with_ec2.txt",'r') as f:
for line in f.readlines()[0:99]:
arr = line.split()
ratio = float(arr[0])/float(arr[1])
ratios.append(ratio)
sw1.append(float(arr[0]))
sw2.append(float(arr[1]))
plt.figure(1)
plt.plot(range(1,100),sw1,'k-',label='MDRPSPA')
plt.plot(range(1,100),sw2,'k--',label='EC2')
plt.xlabel('number of machines')
plt.ylabel('social welfare')
plt.title('Compare Social Welfare of MDRPSPA with EC2')
plt.legend(loc ='upper left')
plt.savefig("result1_1.png")
plt.figure(2)
plt.plot(np.array(range(1,100)),np.array(ratios),'k-')
plt.xlabel('number of machines')
plt.ylabel('Ratio of Social welfare of MDRPSPA to EC2')
plt.title('Ratio of Social Welfare of MDRPSPA to EC2')
plt.savefig("result1_2.png")
def generate_test21_result():
arr = list(range(1,21))
arr.append(30)
arr.append(40)
arr.append(50)
arr.append(100)
result = {}
for i in arr:
result[i] =test_quality(i,'uniform')
# write to a file
with open("/home/augustin/docklet/test_result/quality_uniform_mdrpspa1.txt",'w') as f:
for key, task in result.items():
f.write(str(key) + ' '+ str(result[key]) + '\n')
f.flush()
os.fsync(f)
return
def draw_test2_result():
x = list(range(1,21))
x.append(30)
x.append(40)
x.append(50)
x.append(100)
sw1 = []
sw2 = []
with open('/home/augustin/docklet/test_result/quality_uniform_mdrpspa1.txt','r') as f:
for line in f.readlines()[0:24]:
arr = line.split()
sw1.append(arr[1])
with open('/home/augustin/docklet/test_result/quality_uniform_opt1.txt','r') as f:
for line in f.readlines()[0:24]:
arr = line.split()
sw2.append(arr[1])
plt.figure(1)
plt.plot(x,sw1,'k-', label='MDRPSPAA')
plt.plot(x,sw2,'k--', label='Upper Bound')
plt.xlabel('number of machines')
plt.ylabel('social welfare')
plt.title('Compare Social Welfare of MDRPSPAA with Upper Bound')
plt.legend(loc='upper left')
plt.savefig("result2_1.png")
ratios = []
for i,v in enumerate(x):
ratios.append(float(sw1[i]) / float(sw2[i]))
plt.figure(2)
plt.plot(x,ratios,'k-')
plt.xlabel('number of machines')
plt.ylabel('ratio of MDRPSPAA to Upper Bound')
plt.title('Ratio of Social Welfare of MDRPSPA to Upper Bound')
plt.savefig("result2_2.png")
with open("/home/augustin/docklet/test_result/quality_uniform1.txt",'w') as f:
for i,v in enumerate(x):
f.write(str(sw1[i-1])+' '+str(sw2[i-1])+'\n')
f.flush()
os.fsync(f)
return
def test_time_each(num_machines,request_type):
os.system("kill -9 $(pgrep acommdkp)")
init_scheduler()
for i in range(0,num_machines):
add_machine("m"+str(i),64,256)
slogger.info("add colonies done!")
requests = parse_test_data("/home/augustin/docklet/test_data/"+request_type+'_tasks'+str(num_machines)+'.txt',64,256,num_machines,request_type)
elapsed = 0
print("begin")
start = time.time()
for index,request in requests.items():
pre_allocate(request)
allocate(request['id'])
slogger.info("pre allocate tasks done")
slogger.info("allocate tasks done")
print("\n\nallocate done\n\n")
# generate result quality
old_total_social_welfare = 0
new_total_social_welfare = 0
while True:
new_total_social_welfare =0
for i in range(0,num_machines):
new_total_social_welfare += machines['m'+str(i)].social_welfare
if old_total_social_welfare == new_total_social_welfare:
elapsed = time.time()-start
print("time used:",elapsed)
break
else:
old_total_social_welfare = new_total_social_welfare
time.sleep(1)
print("MDRPSPA social_welfare: ",new_total_social_welfare);
stop_scheduler()
return elapsed
def test_time():
x = list(range(1,101))
times = []
for i in range(1,101):
used = test_time_each(i,'uniform')
times.append(used/8)
plt.plot(x,times,'k-')
plt.xlabel('number of machines')
plt.ylabel('computing time')
plt.title('Computing time of MDRPSPAA')
plt.savefig("result3_1.png")
with open("/home/augustin/docklet/test_result/time_uniform1.txt",'w') as f:
for i,v in enumerate(x):
f.write(str(v)+' '+str(times[i])+'\n')
f.flush()
os.fsync(f)
def test_time_quality(num_machines,request_type):
os.system("kill -9 $(pgrep acommdkp)")
init_scheduler()
for i in range(0,100):
add_machine("m"+str(i),64,256)
slogger.info("add colonies done!")
requests = parse_test_data("/home/augustin/docklet/test_data/"+request_type+'_tasks'+str(num_machines)+'.txt',64,256,num_machines,request_type)
elapsed = 0
print("begin")
start = time.time()
times = []
quality = []
i = 0
j=0
old_total_social_welfare = 0
for index,request in requests.items():
pre_allocate(request)
allocate(request['id'])
if i == len(requests.items())/num_machines:
used = time.time()-start
times.append(used/8)
old_total_social_welfare = 0
for i in range(0,num_machines):
old_total_social_welfare += machines['m'+str(i)].social_welfare
quality.append(old_total_social_welfare)
print("part ",j, " done")
i =0
j+=1
i+=1
while True:
time.sleep(1)
new_total_social_welfare =0
for i in range(0,num_machines):
new_total_social_welfare += machines['m'+str(i)].social_welfare
if old_total_social_welfare == new_total_social_welfare:
break
else:
used = time.time()-start
times.append(used/8)
quality.append(new_total_social_welfare)
old_total_social_welfare = new_total_social_welfare
time.sleep(0.1)
print("MDRPSPA social_welfare: ",new_total_social_welfare);
plt.plot(times,quality,'k-')
plt.xlabel('computing time')
plt.ylabel('social welfare')
plt.title('Social welfare changes with time')
plt.savefig("result3_2.png")
stop_scheduler()
return
if __name__ == '__main__':
# test_pub_socket();
# test_colony_socket();
# test_all();
# generate_multivariate_ca(128,256,100)
# generate_test_data(128,256,100,"reliable",'ca',0)
# generate_test11_result()
# generate_test12_result()
# draw_test2_result()
# draw_test1_result()
# test_time()
# test_time_quality(100,'uniform')
# for i in range(0,10):
# test_time_each(100,'uniform')
# generate_test_data(256,480,100,"reliable",'ec2',0)
# i_sw1,i_sw2 = test_compare_ec2(100,'ec2')
# generate_multivariate_uniform_optimal(128,256,512)
test_compare_ca_stable(10,'ca')
# i_sw1,i_sw2 = test_compare_ca_stable(1,'ca')
# for i in range(1,101):
# print(i)
# test_generate_test_data(100,'uniform')
# generate_test_data(64,256,20,"reliable",'uniform',0)
# test_quality(20,'uniform')
# generate_test_data(64,256,i,"reliable",'binomial',0)
| bsd-3-clause |
pmaunz/pyqtgraph | pyqtgraph/exporters/Matplotlib.py | 39 | 4821 | from ..Qt import QtGui, QtCore
from .Exporter import Exporter
from .. import PlotItem
from .. import functions as fn
__all__ = ['MatplotlibExporter']
"""
It is helpful when using the matplotlib Exporter if your
.matplotlib/matplotlibrc file is configured appropriately.
The following are suggested for getting usable PDF output that
can be edited in Illustrator, etc.
backend : Qt4Agg
text.usetex : True # Assumes you have a findable LaTeX installation
interactive : False
font.family : sans-serif
font.sans-serif : 'Arial' # (make first in list)
mathtext.default : sf
figure.facecolor : white # personal preference
# next setting allows pdf font to be readable in Adobe Illustrator
pdf.fonttype : 42 # set fonts to TrueType (otherwise it will be 3
# and the text will be vectorized.
text.dvipnghack : True # primarily to clean up font appearance on Mac
The advantage is that there is less to do to get an exported file cleaned and ready for
publication. Fonts are not vectorized (outlined), and window colors are white.
"""
class MatplotlibExporter(Exporter):
Name = "Matplotlib Window"
windows = []
def __init__(self, item):
Exporter.__init__(self, item)
def parameters(self):
return None
def cleanAxes(self, axl):
if type(axl) is not list:
axl = [axl]
for ax in axl:
if ax is None:
continue
for loc, spine in ax.spines.iteritems():
if loc in ['left', 'bottom']:
pass
elif loc in ['right', 'top']:
spine.set_color('none')
# do not draw the spine
else:
raise ValueError('Unknown spine location: %s' % loc)
# turn off ticks when there is no spine
ax.xaxis.set_ticks_position('bottom')
def export(self, fileName=None):
if isinstance(self.item, PlotItem):
mpw = MatplotlibWindow()
MatplotlibExporter.windows.append(mpw)
stdFont = 'Arial'
fig = mpw.getFigure()
# get labels from the graphic item
xlabel = self.item.axes['bottom']['item'].label.toPlainText()
ylabel = self.item.axes['left']['item'].label.toPlainText()
title = self.item.titleLabel.text
ax = fig.add_subplot(111, title=title)
ax.clear()
self.cleanAxes(ax)
#ax.grid(True)
for item in self.item.curves:
x, y = item.getData()
opts = item.opts
pen = fn.mkPen(opts['pen'])
if pen.style() == QtCore.Qt.NoPen:
linestyle = ''
else:
linestyle = '-'
color = tuple([c/255. for c in fn.colorTuple(pen.color())])
symbol = opts['symbol']
if symbol == 't':
symbol = '^'
symbolPen = fn.mkPen(opts['symbolPen'])
symbolBrush = fn.mkBrush(opts['symbolBrush'])
markeredgecolor = tuple([c/255. for c in fn.colorTuple(symbolPen.color())])
markerfacecolor = tuple([c/255. for c in fn.colorTuple(symbolBrush.color())])
markersize = opts['symbolSize']
if opts['fillLevel'] is not None and opts['fillBrush'] is not None:
fillBrush = fn.mkBrush(opts['fillBrush'])
fillcolor = tuple([c/255. for c in fn.colorTuple(fillBrush.color())])
ax.fill_between(x=x, y1=y, y2=opts['fillLevel'], facecolor=fillcolor)
pl = ax.plot(x, y, marker=symbol, color=color, linewidth=pen.width(),
linestyle=linestyle, markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor,
markersize=markersize)
xr, yr = self.item.viewRange()
ax.set_xbound(*xr)
ax.set_ybound(*yr)
ax.set_xlabel(xlabel) # place the labels.
ax.set_ylabel(ylabel)
mpw.draw()
else:
raise Exception("Matplotlib export currently only works with plot items")
MatplotlibExporter.register()
class MatplotlibWindow(QtGui.QMainWindow):
def __init__(self):
from ..widgets import MatplotlibWidget
QtGui.QMainWindow.__init__(self)
self.mpl = MatplotlibWidget.MatplotlibWidget()
self.setCentralWidget(self.mpl)
self.show()
def __getattr__(self, attr):
return getattr(self.mpl, attr)
def closeEvent(self, ev):
MatplotlibExporter.windows.remove(self)
| mit |
pratapvardhan/pandas | pandas/tests/indexes/period/test_formats.py | 4 | 7124 | from pandas import PeriodIndex
import numpy as np
import pytest
import pandas.util.testing as tm
import pandas as pd
def test_to_native_types():
index = PeriodIndex(['2017-01-01', '2017-01-02',
'2017-01-03'], freq='D')
# First, with no arguments.
expected = np.array(['2017-01-01', '2017-01-02',
'2017-01-03'], dtype='=U10')
result = index.to_native_types()
tm.assert_numpy_array_equal(result, expected)
# No NaN values, so na_rep has no effect
result = index.to_native_types(na_rep='pandas')
tm.assert_numpy_array_equal(result, expected)
# Make sure slicing works
expected = np.array(['2017-01-01', '2017-01-03'], dtype='=U10')
result = index.to_native_types([0, 2])
tm.assert_numpy_array_equal(result, expected)
# Make sure date formatting works
expected = np.array(['01-2017-01', '01-2017-02',
'01-2017-03'], dtype='=U10')
result = index.to_native_types(date_format='%m-%Y-%d')
tm.assert_numpy_array_equal(result, expected)
# NULL object handling should work
index = PeriodIndex(['2017-01-01', pd.NaT, '2017-01-03'], freq='D')
expected = np.array(['2017-01-01', 'NaT', '2017-01-03'], dtype=object)
result = index.to_native_types()
tm.assert_numpy_array_equal(result, expected)
expected = np.array(['2017-01-01', 'pandas',
'2017-01-03'], dtype=object)
result = index.to_native_types(na_rep='pandas')
tm.assert_numpy_array_equal(result, expected)
class TestPeriodIndexRendering(object):
@pytest.mark.parametrize('method', ['__repr__', '__unicode__', '__str__'])
def test_representation(self, method):
# GH#7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'],
freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
result = getattr(idx, method)()
assert result == expected
def test_representation_to_series(self):
# GH#10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'],
freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
assert result == expected
def test_summary(self):
# GH#9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'],
freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx._summary()
assert result == expected
| bsd-3-clause |
GuessWhoSamFoo/pandas | pandas/core/reshape/tile.py | 1 | 19404 | """
Quantilization functions and related stuff
"""
from functools import partial
import numpy as np
from pandas._libs.lib import infer_dtype
from pandas.core.dtypes.common import (
_NS_DTYPE, ensure_int64, is_categorical_dtype, is_datetime64_dtype,
is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_integer,
is_scalar, is_timedelta64_dtype)
from pandas.core.dtypes.missing import isna
from pandas import (
Categorical, Index, Interval, IntervalIndex, Series, Timedelta, Timestamp,
to_datetime, to_timedelta)
import pandas.core.algorithms as algos
import pandas.core.nanops as nanops
def cut(x, bins, right=True, labels=None, retbins=False, precision=3,
include_lowest=False, duplicates='raise'):
"""
Bin values into discrete intervals.
Use `cut` when you need to segment and sort data values into bins. This
function is also useful for going from a continuous variable to a
categorical variable. For example, `cut` could convert ages to groups of
age ranges. Supports binning into an equal number of bins, or a
pre-specified array of bins.
Parameters
----------
x : array-like
The input array to be binned. Must be 1-dimensional.
bins : int, sequence of scalars, or pandas.IntervalIndex
The criteria to bin by.
* int : Defines the number of equal-width bins in the range of `x`. The
range of `x` is extended by .1% on each side to include the minimum
and maximum values of `x`.
* sequence of scalars : Defines the bin edges allowing for non-uniform
width. No extension of the range of `x` is done.
* IntervalIndex : Defines the exact bins to be used. Note that
IntervalIndex for `bins` must be non-overlapping.
right : bool, default True
Indicates whether `bins` includes the rightmost edge or not. If
``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]``
indicate (1,2], (2,3], (3,4]. This argument is ignored when
`bins` is an IntervalIndex.
labels : array or bool, optional
Specifies the labels for the returned bins. Must be the same length as
the resulting bins. If False, returns only integer indicators of the
bins. This affects the type of the output container (see below).
This argument is ignored when `bins` is an IntervalIndex.
retbins : bool, default False
Whether to return the bins or not. Useful when bins is provided
as a scalar.
precision : int, default 3
The precision at which to store and display the bins labels.
include_lowest : bool, default False
Whether the first interval should be left-inclusive or not.
duplicates : {default 'raise', 'drop'}, optional
If bin edges are not unique, raise ValueError or drop non-uniques.
.. versionadded:: 0.23.0
Returns
-------
out : pandas.Categorical, Series, or ndarray
An array-like object representing the respective bin for each value
of `x`. The type depends on the value of `labels`.
* True (default) : returns a Series for Series `x` or a
pandas.Categorical for all other inputs. The values stored within
are Interval dtype.
* sequence of scalars : returns a Series for Series `x` or a
pandas.Categorical for all other inputs. The values stored within
are whatever the type in the sequence is.
* False : returns an ndarray of integers.
bins : numpy.ndarray or IntervalIndex.
The computed or specified bins. Only returned when `retbins=True`.
For scalar or sequence `bins`, this is an ndarray with the computed
bins. If set `duplicates=drop`, `bins` will drop non-unique bin. For
an IntervalIndex `bins`, this is equal to `bins`.
See Also
--------
qcut : Discretize variable into equal-sized buckets based on rank
or based on sample quantiles.
pandas.Categorical : Array type for storing data that come from a
fixed set of values.
Series : One-dimensional array with axis labels (including time series).
pandas.IntervalIndex : Immutable Index implementing an ordered,
sliceable set.
Notes
-----
Any NA values will be NA in the result. Out of bounds values will be NA in
the resulting Series or pandas.Categorical object.
Examples
--------
Discretize into three equal-sized bins.
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3)
... # doctest: +ELLIPSIS
[(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...
Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ...
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True)
... # doctest: +ELLIPSIS
([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...
Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ...
array([0.994, 3. , 5. , 7. ]))
Discovers the same bins, but assign them specific labels. Notice that
the returned Categorical's categories are `labels` and is ordered.
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]),
... 3, labels=["bad", "medium", "good"])
[bad, good, medium, medium, good, bad]
Categories (3, object): [bad < medium < good]
``labels=False`` implies you just want the bins back.
>>> pd.cut([0, 1, 1, 2], bins=4, labels=False)
array([0, 1, 1, 3])
Passing a Series as an input returns a Series with categorical dtype:
>>> s = pd.Series(np.array([2, 4, 6, 8, 10]),
... index=['a', 'b', 'c', 'd', 'e'])
>>> pd.cut(s, 3)
... # doctest: +ELLIPSIS
a (1.992, 4.667]
b (1.992, 4.667]
c (4.667, 7.333]
d (7.333, 10.0]
e (7.333, 10.0]
dtype: category
Categories (3, interval[float64]): [(1.992, 4.667] < (4.667, ...
Passing a Series as an input returns a Series with mapping value.
It is used to map numerically to intervals based on bins.
>>> s = pd.Series(np.array([2, 4, 6, 8, 10]),
... index=['a', 'b', 'c', 'd', 'e'])
>>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False)
... # doctest: +ELLIPSIS
(a 0.0
b 1.0
c 2.0
d 3.0
e 4.0
dtype: float64, array([0, 2, 4, 6, 8]))
Use `drop` optional when bins is not unique
>>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True,
... right=False, duplicates='drop')
... # doctest: +ELLIPSIS
(a 0.0
b 1.0
c 2.0
d 3.0
e 3.0
dtype: float64, array([0, 2, 4, 6, 8]))
Passing an IntervalIndex for `bins` results in those categories exactly.
Notice that values not covered by the IntervalIndex are set to NaN. 0
is to the left of the first bin (which is closed on the right), and 1.5
falls between two bins.
>>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])
>>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins)
[NaN, (0, 1], NaN, (2, 3], (4, 5]]
Categories (3, interval[int64]): [(0, 1] < (2, 3] < (4, 5]]
"""
# NOTE: this binning code is changed a bit from histogram for var(x) == 0
# for handling the cut for datetime and timedelta objects
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if not np.iterable(bins):
if is_scalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
try: # for array-like
sz = x.size
except AttributeError:
x = np.asarray(x)
sz = x.size
if sz == 0:
raise ValueError('Cannot cut empty array')
rng = (nanops.nanmin(x), nanops.nanmax(x))
mn, mx = [mi + 0.0 for mi in rng]
if np.isinf(mn) or np.isinf(mx):
# GH 24314
raise ValueError('cannot specify integer `bins` when input data '
'contains infinity')
elif mn == mx: # adjust end points before binning
mn -= .001 * abs(mn) if mn != 0 else .001
mx += .001 * abs(mx) if mx != 0 else .001
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
else: # adjust end points after binning
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
adj = (mx - mn) * 0.001 # 0.1% of the range
if right:
bins[0] -= adj
else:
bins[-1] += adj
elif isinstance(bins, IntervalIndex):
if bins.is_overlapping:
raise ValueError('Overlapping IntervalIndex is not accepted.')
else:
if is_datetime64tz_dtype(bins):
bins = np.asarray(bins, dtype=_NS_DTYPE)
else:
bins = np.asarray(bins)
bins = _convert_bin_to_numeric_type(bins, dtype)
if (np.diff(bins) < 0).any():
raise ValueError('bins must increase monotonically.')
fac, bins = _bins_to_cuts(x, bins, right=right, labels=labels,
precision=precision,
include_lowest=include_lowest,
dtype=dtype,
duplicates=duplicates)
return _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name, dtype)
def qcut(x, q, labels=None, retbins=False, precision=3, duplicates='raise'):
"""
Quantile-based discretization function. Discretize variable into
equal-sized buckets based on rank or based on sample quantiles. For example
1000 values for 10 quantiles would produce a Categorical object indicating
quantile membership for each data point.
Parameters
----------
x : 1d ndarray or Series
q : integer or array of quantiles
Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately
array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins.
retbins : bool, optional
Whether to return the (bins, labels) or not. Can be useful if bins
is given as a scalar.
precision : int, optional
The precision at which to store and display the bins labels
duplicates : {default 'raise', 'drop'}, optional
If bin edges are not unique, raise ValueError or drop non-uniques.
.. versionadded:: 0.20.0
Returns
-------
out : Categorical or Series or array of integers if labels is False
The return type (Categorical or Series) depends on the input: a Series
of type category if input is a Series else Categorical. Bins are
represented as categories when categorical data is returned.
bins : ndarray of floats
Returned only if `retbins` is True.
Notes
-----
Out of bounds values will be NA in the resulting Categorical object
Examples
--------
>>> pd.qcut(range(5), 4)
... # doctest: +ELLIPSIS
[(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]]
Categories (4, interval[float64]): [(-0.001, 1.0] < (1.0, 2.0] ...
>>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"])
... # doctest: +SKIP
[good, good, medium, bad, bad]
Categories (3, object): [good < medium < bad]
>>> pd.qcut(range(5), 4, labels=False)
array([0, 0, 1, 2, 3])
"""
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if is_integer(q):
quantiles = np.linspace(0, 1, q + 1)
else:
quantiles = q
bins = algos.quantile(x, quantiles)
fac, bins = _bins_to_cuts(x, bins, labels=labels,
precision=precision, include_lowest=True,
dtype=dtype, duplicates=duplicates)
return _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name, dtype)
def _bins_to_cuts(x, bins, right=True, labels=None,
precision=3, include_lowest=False,
dtype=None, duplicates='raise'):
if duplicates not in ['raise', 'drop']:
raise ValueError("invalid value for 'duplicates' parameter, "
"valid options are: raise, drop")
if isinstance(bins, IntervalIndex):
# we have a fast-path here
ids = bins.get_indexer(x)
result = algos.take_nd(bins, ids)
result = Categorical(result, categories=bins, ordered=True)
return result, bins
unique_bins = algos.unique(bins)
if len(unique_bins) < len(bins) and len(bins) != 2:
if duplicates == 'raise':
raise ValueError("Bin edges must be unique: {bins!r}.\nYou "
"can drop duplicate edges by setting "
"the 'duplicates' kwarg".format(bins=bins))
else:
bins = unique_bins
side = 'left' if right else 'right'
ids = ensure_int64(bins.searchsorted(x, side=side))
if include_lowest:
ids[x == bins[0]] = 1
na_mask = isna(x) | (ids == len(bins)) | (ids == 0)
has_nas = na_mask.any()
if labels is not False:
if labels is None:
labels = _format_labels(bins, precision, right=right,
include_lowest=include_lowest,
dtype=dtype)
else:
if len(labels) != len(bins) - 1:
raise ValueError('Bin labels must be one fewer than '
'the number of bin edges')
if not is_categorical_dtype(labels):
labels = Categorical(labels, categories=labels, ordered=True)
np.putmask(ids, na_mask, 0)
result = algos.take_nd(labels, ids - 1)
else:
result = ids - 1
if has_nas:
result = result.astype(np.float64)
np.putmask(result, na_mask, np.nan)
return result, bins
def _trim_zeros(x):
while len(x) > 1 and x[-1] == '0':
x = x[:-1]
if len(x) > 1 and x[-1] == '.':
x = x[:-1]
return x
def _coerce_to_type(x):
"""
if the passed data is of datetime/timedelta type,
this method converts it to numeric so that cut method can
handle it
"""
dtype = None
if is_datetime64tz_dtype(x):
dtype = x.dtype
elif is_datetime64_dtype(x):
x = to_datetime(x)
dtype = np.dtype('datetime64[ns]')
elif is_timedelta64_dtype(x):
x = to_timedelta(x)
dtype = np.dtype('timedelta64[ns]')
if dtype is not None:
# GH 19768: force NaT to NaN during integer conversion
x = np.where(x.notna(), x.view(np.int64), np.nan)
return x, dtype
def _convert_bin_to_numeric_type(bins, dtype):
"""
if the passed bin is of datetime/timedelta type,
this method converts it to integer
Parameters
----------
bins : list-like of bins
dtype : dtype of data
Raises
------
ValueError if bins are not of a compat dtype to dtype
"""
bins_dtype = infer_dtype(bins, skipna=False)
if is_timedelta64_dtype(dtype):
if bins_dtype in ['timedelta', 'timedelta64']:
bins = to_timedelta(bins).view(np.int64)
else:
raise ValueError("bins must be of timedelta64 dtype")
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
if bins_dtype in ['datetime', 'datetime64']:
bins = to_datetime(bins).view(np.int64)
else:
raise ValueError("bins must be of datetime64 dtype")
return bins
def _convert_bin_to_datelike_type(bins, dtype):
"""
Convert bins to a DatetimeIndex or TimedeltaIndex if the orginal dtype is
datelike
Parameters
----------
bins : list-like of bins
dtype : dtype of data
Returns
-------
bins : Array-like of bins, DatetimeIndex or TimedeltaIndex if dtype is
datelike
"""
if is_datetime64tz_dtype(dtype):
bins = to_datetime(bins.astype(np.int64),
utc=True).tz_convert(dtype.tz)
elif is_datetime_or_timedelta_dtype(dtype):
bins = Index(bins.astype(np.int64), dtype=dtype)
return bins
def _format_labels(bins, precision, right=True,
include_lowest=False, dtype=None):
""" based on the dtype, return our labels """
closed = 'right' if right else 'left'
if is_datetime64tz_dtype(dtype):
formatter = partial(Timestamp, tz=dtype.tz)
adjust = lambda x: x - Timedelta('1ns')
elif is_datetime64_dtype(dtype):
formatter = Timestamp
adjust = lambda x: x - Timedelta('1ns')
elif is_timedelta64_dtype(dtype):
formatter = Timedelta
adjust = lambda x: x - Timedelta('1ns')
else:
precision = _infer_precision(precision, bins)
formatter = lambda x: _round_frac(x, precision)
adjust = lambda x: x - 10 ** (-precision)
breaks = [formatter(b) for b in bins]
labels = IntervalIndex.from_breaks(breaks, closed=closed)
if right and include_lowest:
# we will adjust the left hand side by precision to
# account that we are all right closed
v = adjust(labels[0].left)
i = IntervalIndex([Interval(v, labels[0].right, closed='right')])
labels = i.append(labels[1:])
return labels
def _preprocess_for_cut(x):
"""
handles preprocessing for cut where we convert passed
input to array, strip the index information and store it
separately
"""
x_is_series = isinstance(x, Series)
series_index = None
name = None
if x_is_series:
series_index = x.index
name = x.name
# Check that the passed array is a Pandas or Numpy object
# We don't want to strip away a Pandas data-type here (e.g. datetimetz)
ndim = getattr(x, 'ndim', None)
if ndim is None:
x = np.asarray(x)
if x.ndim != 1:
raise ValueError("Input array must be 1 dimensional")
return x_is_series, series_index, name, x
def _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name, dtype):
"""
handles post processing for the cut method where
we combine the index information if the originally passed
datatype was a series
"""
if x_is_series:
fac = Series(fac, index=series_index, name=name)
if not retbins:
return fac
bins = _convert_bin_to_datelike_type(bins, dtype)
return fac, bins
def _round_frac(x, precision):
"""
Round the fractional part of the given number
"""
if not np.isfinite(x) or x == 0:
return x
else:
frac, whole = np.modf(x)
if whole == 0:
digits = -int(np.floor(np.log10(abs(frac)))) - 1 + precision
else:
digits = precision
return np.around(x, digits)
def _infer_precision(base_precision, bins):
"""Infer an appropriate precision for _round_frac
"""
for precision in range(base_precision, 20):
levels = [_round_frac(b, precision) for b in bins]
if algos.unique(levels).size == bins.size:
return precision
return base_precision # default
| bsd-3-clause |
ucd-cws/arcproject-wq-processing | arcproject/scripts/wq_gain.py | 1 | 7851 | import logging
import traceback
import six
from string import digits
import os
import pandas as pd
from arcproject.waterquality import classes
from . import wqt_timestamp_match as wqt
def convert_wq_dtypes(df): # TODO check to see if the wq_from_file function can do this
"""
Converts all the strings in dataframe into numeric (pd.to_numeric introduced in 0.17 so can't use that)
:param df:
:return: dataframe with columns converted to numeric
"""
for column in list(df.columns.values):
if df[column].dtype == object:
if pd.__version__ >= "0.17": # separate ways to convert to numeric before and after version 0.17
try:
df[column] = pd.to_numeric(df[column])
except ValueError: # try coercing the numeric fields - if it an exception is raised, we should be able to continue becuase we just need to numbers to act like numbers - it's ok if text stays text
pass
else:
df[column] = df[column].convert_objects(convert_numeric=True)
return df
def profile_function_historic(*args, **kwargs):
"""
Site functions are passed to wq_df2database so that it can determine which site a record is from. Historic data
will use this function since it will parse if off the data frame as constructed in this code (which includes
a field for the filename, which has the site code). Future data will have another method and use a different site
function that will be passed to wq_df2database
Question: Why are we using *args and **kwargs for this function? Why aren't we using directly named arguments here?
We should document this if we figure it out (not inclined to change it without knowing reason though)
:param args:
:param kwargs:
:return: site object
"""
part = kwargs["part"]
filename = kwargs["filename"] # get the value of the data source field (source_field defined globally)
try:
filename = os.path.splitext(filename)[0]
part_code = filename.split("_")[int(part)].upper() # get the selected underscored part of the name
except IndexError:
raise IndexError("Filename was unable to be split based on underscore in order to parse site name -"
" be sure your filename format matches the site function used, or that you're using "
"the correct site retrieval function")
return part_code
def gain_wq_df2database(data, field_map=classes.gain_water_quality_header_map, session=None):
"""
Given a pandas data frame of water quality records, translates those records to ORM-mapped objects in the database.
:param data: a pandas data frame of water quality records
:param field_map: a field map (dictionary) that translates keys in the data frame (as the dict keys) to the keys used
in the ORM - uses a default, but when the schema of the data files is different, a new field map will be necessary
:param session: a SQLAlchemy session to use - for tests, we often want the session passed so it can be inspected,
otherwise, we'll likely just create it. If a session is passed, this function will NOT commit new records - that
becomes the responsibility of the caller.
:return:
"""
if not session: # if no session was passed, create our own
session = classes.get_new_session()
session_created = True
else:
session_created = False
try:
records = data.iterrows()
for row in records: # iterates over all of the rows in the data frames the fast way
gain_make_record(field_map, row[1], session) # row[1] is the actual data included in the row
if session_created: # only commit if this function created the session - otherwise leave it to caller
session.commit() # saves all new objects
finally:
if session_created:
session.close()
def gain_make_record(field_map, row, session):
"""
Called for each record in the loaded and joined Pandas data frame. Given a named tuple of a row in the data frame,
translates it into a profile object
:param field_map: A field map dictionary with keys based on the data frame fields and values of the database field
:param row: a named tuple of the row in the data frame to translate into the profile object
:param session: an open SQLAlchemy database session
:return:
"""
profile = classes.VerticalProfile() # instantiates a new object
for key in row.index: # converts named_tuple to a Dict-like and gets the keys
# look up the field that is used in the ORM/database using the key from the namedtuple.
try:
class_field = field_map[key]
except KeyError:
# logging.warning("Skipping field {} with value {}. Field not found in field map.".format(key, getattr(row, key)))
continue
if class_field is None: # if it's an explicitly defined None and not nonexistent, then skip it silently
continue
try:
setattr(profile, class_field, getattr(row, key)) # for each value, it sets the object's value to match
except AttributeError:
print("Incorrect field map - original message was {}".format(traceback.format_exc()))
else: # if we don't break for a bad site code or something else, then add the object
session.add(profile) # adds the object for creation in the DB - will be committed later.
return
def profile_from_text(session, profile_abbreviation):
"""
Given a site code and an open database session, returns the site object
:param session: An open database session
:param profile_abbreviation: a text string that matches a code in the database
:return: ProfileSite object
"""
return session.query(classes.ProfileSite).filter(classes.ProfileSite.abbreviation == profile_abbreviation).one()
def main(gain_file, site=profile_function_historic, gain=profile_function_historic, site_gain_params=wqt.site_function_params):
"""
Takes a water quality vertical profile at a specific site, date, and gain setting and adds to database
:param gain_file: vertical gain profile
:param site: a unique identifier for the site (2-4 letter character string) or a function to parse the profile name
:param gain: the gain setting used when recording the water quality data ("0", "1", "10", "100") or a function to
parse the gain setting
:param site_gain_params: parameters to pass to the site function
:return:
"""
# convert raw water quality gain file into pandas dataframe using function from wqt_timestamp_match
gain_df = wqt.wq_from_file(gain_file)
# convert data types to float
num = convert_wq_dtypes(gain_df) # TODO see if this step could be done in wq_from_file()
# add source of wqp file (get's lost when the file gets averaged)
wqt.addsourcefield(gain_df, "WQ_SOURCE", gain_file)
# basename of the source gain file
base = os.path.basename(gain_file)
# try parsing the site from the filename
try:
# If it's a text code, use the text, otherwise call the function
if isinstance(site, six.string_types):
site = site.upper()
else:
site = site(filename=base, part=site_gain_params["site_part"])
# lookup vert profile from site text
session = classes.get_new_session()
profile_site_id = profile_from_text(session, site)
gain_df['Site'] = profile_site_id.id
session.close()
except ValueError:
traceback.print_exc()
# try parsing the gain setting from the filename
try:
# If gain setting the gain is provided use it, otherwise call the function
if isinstance(gain, six.integer_types) or isinstance(gain, six.string_types):
# strip all letters from gain setting ("GN10" -> 10)
digits_only = ''.join(c for c in str(gain) if c in digits)
gain_digits = int(digits_only)
gain_df['Gain'] = gain_digits
else:
gain_setting_from_name = gain(filename=base, part=site_gain_params["gain_part"])
digits_only = ''.join(c for c in str(gain_setting_from_name) if c in digits)
gain_digits = int(digits_only)
gain_df['Gain'] = gain_digits
except ValueError:
traceback.print_exc()
# add row to database table vertical_profiles
gain_wq_df2database(gain_df)
return
| mit |
sorgerlab/rasmodel | kras_gtp_hydrolysis.py | 6 | 1613 | from rasmodel.scenarios.default import model
import numpy as np
from matplotlib import pyplot as plt
from pysb.integrate import Solver
from pysb import *
from tbidbaxlipo.util import fitting
KRAS = model.monomers['KRAS']
GTP = model.monomers['GTP']
total_pi = 50000
for mutant in KRAS.site_states['mutant']:
Initial(KRAS(gtp=1, gap=None, gef=None, p_loop=None, s1s2='open', CAAX=None,
mutant=mutant) % GTP(p=1, label='n'),
Parameter('KRAS_%s_GTP_0' % mutant, 0))
plt.ion()
plt.figure()
t = np.linspace(0, 1000, 1000) # 1000 seconds
for mutant in KRAS.site_states['mutant']:
# Zero out all initial conditions
for ic in model.initial_conditions:
ic[1].value = 0
model.parameters['KRAS_%s_GTP_0' % mutant].value = total_pi
sol = Solver(model, t)
sol.run()
plt.plot(t, sol.yobs['Pi_'] / total_pi, label=mutant)
plt.ylabel('GTP hydrolyzed (%)')
plt.ylim(top=1)
plt.xlabel('Time (s)')
plt.title('Intrinsic hydrolysis')
plt.legend(loc='upper left', fontsize=11, frameon=False)
plt.figure()
for mutant in KRAS.site_states['mutant']:
# Zero out all initial conditions
for ic in model.initial_conditions:
ic[1].value = 0
model.parameters['RASA1_0'].value = 50000
model.parameters['KRAS_%s_GTP_0' % mutant].value = total_pi
sol = Solver(model, t)
sol.run()
plt.plot(t, sol.yobs['Pi_'] / total_pi, label=mutant)
plt.ylabel('GTP hydrolyzed (%)')
plt.ylim(top=1)
plt.xlabel('Time (s)')
plt.title('GAP-mediated hydrolysis')
plt.legend(loc='upper right', fontsize=11, frameon=False)
| mit |
flightgong/scikit-learn | examples/covariance/plot_covariance_estimation.py | 250 | 5070 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/sklearn/cross_decomposition/cca_.py | 151 | 3192 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
super(CCA, self).__init__(n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| mit |
nan86150/ImageFusion | lib/python2.7/site-packages/matplotlib/tests/test_lines.py | 10 | 2982 | """
Tests specific to the lines module.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from nose.tools import assert_true
from timeit import repeat
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import cleanup, image_comparison
@cleanup
def test_invisible_Line_rendering():
"""
Github issue #1256 identified a bug in Line.draw method
Despite visibility attribute set to False, the draw method was not
returning early enough and some pre-rendering code was executed
though not necessary.
Consequence was an excessive draw time for invisible Line instances
holding a large number of points (Npts> 10**6)
"""
# Creates big x and y data:
N = 10**7
x = np.linspace(0,1,N)
y = np.random.normal(size=N)
# Create a plot figure:
fig = plt.figure()
ax = plt.subplot(111)
# Create a "big" Line instance:
l = mpl.lines.Line2D(x,y)
l.set_visible(False)
# but don't add it to the Axis instance `ax`
# [here Interactive panning and zooming is pretty responsive]
# Time the canvas drawing:
t_no_line = min(repeat(fig.canvas.draw, number=1, repeat=3))
# (gives about 25 ms)
# Add the big invisible Line:
ax.add_line(l)
# [Now interactive panning and zooming is very slow]
# Time the canvas drawing:
t_unvisible_line = min(repeat(fig.canvas.draw, number=1, repeat=3))
# gives about 290 ms for N = 10**7 pts
slowdown_factor = (t_unvisible_line/t_no_line)
slowdown_threshold = 2 # trying to avoid false positive failures
assert_true(slowdown_factor < slowdown_threshold)
@cleanup
def test_set_line_coll_dash():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
np.random.seed(0)
# Testing setting linestyles for line collections.
# This should not produce an error.
cs = ax.contour(np.random.randn(20, 30), linestyles=[(0, (3, 3))])
assert True
@cleanup
def test_line_colors():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(range(10), color='none')
ax.plot(range(10), color='r')
ax.plot(range(10), color='.3')
ax.plot(range(10), color=(1, 0, 0, 1))
ax.plot(range(10), color=(1, 0, 0))
fig.canvas.draw()
assert True
@image_comparison(baseline_images=['line_collection_dashes'], remove_text=True)
def test_set_line_coll_dash_image():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
np.random.seed(0)
cs = ax.contour(np.random.randn(20, 30), linestyles=[(0, (3, 3))])
def test_nan_is_sorted():
# Exercises issue from PR #2744 (NaN throwing warning in _is_sorted)
line = mpl.lines.Line2D([],[])
assert_true(line._is_sorted(np.array([1,2,3])))
assert_true(not line._is_sorted(np.array([1,np.nan,3])))
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
ky822/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 128 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
tonyroberts/mdf | mdf/builders/basic.py | 3 | 25184 | """
Basic commonly used builder classes
"""
import numpy as np
import pandas as pa
from ..nodes import MDFNode, MDFEvalNode
from collections import deque, defaultdict
import datetime
import operator
import csv
import matplotlib.pyplot as pp
import sys
import types
if sys.version_info[0] > 2:
basestring = str
def _get_labels(node, label=None, value=None):
"""
returns a list of lables the same length as value, if value is
a list (or of length 1 if value is not a list)
If label is supplied that will be used as the base (eg x.0...x.N)
or if it's a list it will be padded to the correct length and returned.
"""
# if there's a value return enough labels for the value, if it's a list
if value is not None:
if label is None:
label = _get_labels(node)[0]
# if value is a list return a label for each element
if isinstance(value, (tuple, list, np.ndarray, pa.core.generic.NDFrame, pa.Index)):
# if the label is a list already pad it to the right size
if isinstance(label, (tuple, list, np.ndarray, pa.core.generic.NDFrame, pa.Index)):
label = list(label)
if len(label) < len(value):
label += ["%s.%d" % (label, i) for i in xrange(len(label), len(value))]
return label[:len(value)]
# otherwise create a list using the value's index
if isinstance(value, pa.Series):
return ["%s.%s" % (label, c) for c in value.index]
return ["%s.%d" % (label, i) for i in xrange(len(value))]
# if value is not a list return a single label
if isinstance(label, (tuple, list, np.ndarray, pa.core.generic.NDFrame)):
return list(label[:1])
return [label]
# if there's no value but a label, assume the value is a scalar and return a single label
if label is not None:
if isinstance(label, (tuple, list, np.ndarray, pa.core.generic.NDFrame)):
return list(label[:1])
return [label]
# if there's no value and no node, assume the value is a scalar and return the name of the node
if isinstance(node, MDFNode):
return [node.name.split(".").pop()]
return [str(node)]
def _relabel(columns, node_names, short_names, ctx_ids):
""""
return list of new column names that don't overlap
columns is a list of columns lists for each node.
"""
assert len(columns) == len(node_names) == len(short_names) == len(ctx_ids)
def _get_overlapping(columns):
col_count = {}
overlapping = set()
for cols in columns:
for col in cols:
col_count.setdefault(col, 0)
col_count[col] += 1
if col_count[col] > 1:
overlapping.add(col)
return overlapping
overlap = _get_overlapping(columns)
if not overlap:
return columns
# take a copy as this will be updated in-place
columns = [list(cols) for cols in columns]
# collect the node names and contexts for all overlapping columns
overlap_node_names = {}
for i, (cols, node_name, short_name, ctx_id) \
in enumerate(zip(columns, node_names, short_names, ctx_ids)):
for j, col in enumerate(cols):
if col in overlap:
overlap_node_names.setdefault(col, [])\
.append((i, j, node_name, short_name, ctx_id))
for col, details in overlap_node_names.iteritems():
is_, js_, node_names, short_names, ctx_ids = zip(*details)
# prefix with the node short names if they're unique
unique_short_names = np.unique(short_names)
if unique_short_names.size == len(short_names):
for i, j, node_name, short_name, ctx_id in details:
columns[i][j] = "%s.%s" % (col, short_name)
continue
# otherwise try prefixing with the full names
unique_node_names = np.unique(node_names)
if unique_node_names.size == len(node_names):
# if the short name is common replace it with the long name
if unique_short_names.size == 1 \
and col.startswith(unique_short_names[0]):
for i, j, node_name, short_name, ctx_id in details:
columns[i][j] = col.replace(short_name, node_name)
else:
for i, j, node_name, short_name, ctx_id in details:
columns[i][j] = "%s.%s" % (col, node_name)
continue
# otherwise if the contexts are unique use a context id suffix
unique_ctx_ids = np.unique(ctx_ids)
if unique_ctx_ids.size == len(ctx_ids):
for i, j, node_name, short_name, ctx_id in details:
columns[i][j] = "%s.ctx-%s" % (col, ctx_id)
continue
# If none of those are unique use a numeric suffix.
# This should be quite unlikely.
for x in xrange(len(details)):
columns[i][j] = "%s.%d" % (col, x)
return columns
def _pairs_to_node_label_lists(node_label_pairs):
results = []
for node_or_node_label_pair in node_label_pairs:
if isinstance(node_or_node_label_pair, (tuple, list)):
# it's a tuple/list (node, label)
results.append(node_or_node_label_pair)
else:
# it's just a node - use None as the label and a default
# will be selected in _get_labels
results.append((node_or_node_label_pair, None))
# return ([node,...], [label,...])
return map(list, zip(*results))
class CSVWriter(object):
"""
callable object that appends values to a csv file
For use with mdf.run
"""
def __init__(self, fh, nodes, columns=None):
"""
Writes node values to a csv file for each date.
'fh' may be a file handle, or a filename, or a node.
If fh is a node it will be evaluated for each context used
and is expected to evaluate to the filename or file handle
to write the results to.
"""
# keep track of any file handles opened by this instance so they
# can be closed.
self.fh = fh
self.open_fhs = []
# fh may be either a file handle, a filename or a node
# that evaluates to a file handle or name.
self.writers = {}
if not isinstance(fh, MDFNode):
# if fh isn't a node use the same writer for all contexts
if isinstance(fh, basestring):
fh = open(fh, "wb")
self.open_fhs.append(fh)
writer = csv.writer(fh)
self.writers = defaultdict(lambda: writer)
self.handlers = None
if isinstance(nodes, MDFNode):
nodes = [nodes]
if len(nodes) > 1 and columns is None:
self.nodes, self.columns = _pairs_to_node_label_lists(nodes)
else:
self.nodes = nodes
self.columns = list(columns or [])[:len(nodes)]
self.columns += [None] * (len(nodes) - len(self.columns))
def __del__(self):
self.close()
def close(self):
"""closes any file handles opened by this writer"""
while self.open_fhs:
fh = self.open_fhs.pop()
fh.close()
self.writers.clear()
def __call__(self, date, ctx):
# get the node values from the context
values = [ctx.get_value(node) for node in self.nodes]
# get the writer from the context, or create it if it's not been
# created already.
ctx_id = ctx.get_id()
try:
writer = self.writers[ctx_id]
except KeyError:
fh = self.fh
if isinstance(fh, MDFNode):
fh = ctx.get_value(fh)
if isinstance(fh, basestring):
fh = open(fh, "wb")
self.open_fhs.append(fh)
writer = self.writers[ctx_id] = csv.writer(fh)
# figure out how to handle them and what to write in the header
if self.handlers is None:
header = ["date"]
self.handlers = []
for node, value, column in zip(self.nodes, values, self.columns):
if isinstance(column, MDFNode):
column = ctx.get_value(column)
header.extend(_get_labels(node, column, value))
if isinstance(value, (basestring, int, float, bool, datetime.date)):
self.handlers.append(self._write_basetype)
elif isinstance(value, (list, tuple, np.ndarray, pa.Index, pa.core.generic.NDFrame)):
self.handlers.append(self._write_list)
elif isinstance(value, pa.Series):
self.handlers.append(self._write_series)
else:
raise Exception("Unhandled type %s for node %s" % (type(value), node))
# write the header
writer.writerow(header)
# format the values and write the row
row = [date]
for handler, value in zip(self.handlers, values):
handler(value, row)
writer.writerow(row)
def _write_basetype(self, value, row):
row.append(value)
def _write_list(self, value, row):
row.extend(value)
def _write_series(self, value, row):
row.extend(value)
class NodeTypeHandler(object):
"""
Base class for NodeData handling in DataFrameBuilder. Sub-classes
should override _handle(). Callers should call handle()
"""
def __init__(self, node, filter=False):
self._name = node.short_name
self._filter = node.get_filter() if filter and isinstance(node, MDFEvalNode) else None
self._index = []
self._labels = set()
self._data = dict()
def handle(self, date, ctx, value):
"""
Stashes the date and then handles the data
in the sub-class
"""
if self._filter is None \
or ctx[self._filter]:
self._index.append(date)
self._handle(date, value)
def _handle(self, date, value):
raise NotImplementedError("_handle must be implemented in the subclass")
def get_dataframe(self, dtype=object):
"""
Returns a DataFrame containing the values accumulated
for each column for a node.
"""
columns = self.get_columns()
df = pa.DataFrame(data={}, index=self._index, columns=columns, dtype=dtype)
for (d, l), value in self._data.items():
df[l][d] = value
return df
def get_columns(self):
"""
returns the columns used to construct the dataframe
in get_dataframe
"""
return sorted(self._labels)
class NodeListTypeHandler(NodeTypeHandler):
def __init__(self, node, filter=False):
super(NodeListTypeHandler, self).__init__(node, filter=filter)
def _handle(self, date, value):
# the set of labels is fixed on the first callback
# and is of the form node.name.X for int X
self._labels = self._labels or [self._name + "." + str(i) for i in range(len(value))]
assert len(self._labels) == len(value)
for l, v in zip(self._labels, value):
self._data[(date, l)] = v
class NodeDictTypeHandler(NodeTypeHandler):
def __init__(self, node, filter=False):
super(NodeDictTypeHandler, self).__init__(node, filter=filter)
def _handle(self, date, value):
# the set of labels can grow over time
# and they reflect the big union of the dict keys
self._labels = self._labels.union(map(str, value.keys()))
for k, v in value.items():
self._data[(date, str(k))] = v
class NodeSeriesTypeHandler(NodeTypeHandler):
def __init__(self, node, filter=False):
super(NodeSeriesTypeHandler, self).__init__(node, filter=filter)
def _handle(self, date, value):
# the set of labels can grow over time
# and they reflect the big union of the row labels in the
# node value Series
self._labels = self._labels.union(map(str, value.index))
for l in value.index:
self._data[(date, str(l))] = value[l]
class NodeBaseTypeHandler(NodeTypeHandler):
def __init__(self, node, filter=False):
super(NodeBaseTypeHandler, self).__init__(node, filter=filter)
self._labels.add(self._name)
def _handle(self, date, value):
self._data[(date, self._name)] = value
class DataFrameBuilder(object):
# version number to provide limited backwards compatibility
__version__ = 2
def __init__(self, nodes, contexts=None, dtype=object, sparse_fill_value=None, filter=False,
start_date=None):
"""
Constructs a new DataFrameBuilder.
dtype and sparse_fill_value can be supplied as hints to the
data type that will be constructed and whether or not to try
and create a sparse data frame.
If `filter` is True and the nodes are filtered then only values
where all the filters are True will be returned.
NB. the labels parameter is currently not supported
"""
self.context_handler_dict = {}
self.filter = filter
self.dtype = object
self.sparse_fill_value = None
self.start_date = start_date
self._finalized = False
self._cached_dataframes = {}
self._cached_columns = {}
if isinstance(nodes, MDFNode):
nodes = [nodes]
self.nodes = nodes
if contexts:
assert len(contexts) == len(nodes)
else:
contexts = []
self.contexts = contexts
self._last_ctx = None
def __call__(self, date, ctx):
# copy the version to this instance (this isn't done in the ctor as the regression
# testing works by creating the builders in the main process and then sending them
# to the remote processes - so the version is snapped when the builder is actually
# called).
self._version_ = self.__version__
self._last_ctx = ctx.get_id()
ctx_list = self.contexts or ([ctx] * len(self.nodes))
for ctx_, node in zip(ctx_list, self.nodes):
node_value = ctx_.get_value(node)
handler_dict = self.context_handler_dict.setdefault(ctx.get_id(), {})
key = (node.name, node.short_name, ctx_.get_id())
handler = handler_dict.get(key)
if not handler:
if isinstance(node_value, (basestring, int, float, bool, datetime.date)) \
or isinstance(node_value, tuple(np.typeDict.values())):
handler = NodeBaseTypeHandler(node, filter=self.filter)
elif isinstance(node_value, dict):
handler = NodeDictTypeHandler(node, filter=self.filter)
elif isinstance(node_value, pa.Series):
handler = NodeSeriesTypeHandler(node, filter=self.filter)
elif isinstance(node_value, (list, tuple, deque, np.ndarray, pa.Index, pa.core.generic.NDFrame)):
handler = NodeListTypeHandler(node, filter=self.filter)
else:
raise Exception("Unhandled type %s for node %s" % (type(node_value), node))
handler_dict[key] = handler
if (self.start_date is None) or (date >= self.start_date):
handler.handle(date, ctx_, node_value)
def clear(self):
self.context_handler_dict.clear()
self._cached_columns.clear()
self._cached_dataframes.clear()
def get_dataframe(self, ctx, dtype=None, sparse_fill_value=None):
ctx_id = ctx if isinstance(ctx, int) else ctx.get_id()
# if the builder's been finalized and there's a dataframe cached
# return that without trying to convert it to a sparse dataframe
# or changing the dtypes (dtype and sparse_fill_value are only
# hints).
try:
return self._cached_dataframes[ctx_id]
except KeyError:
pass
if dtype is None:
dtype = self.dtype
result_df = self._build_dataframe(ctx_id, dtype)
if sparse_fill_value is None:
sparse_fill_value = self.sparse_fill_value
if sparse_fill_value is not None:
# this doesn't always work depending on the actual dtype
# the dataframe ends up being
try:
result_df = result_df.to_sparse(fill_value=sparse_fill_value)
except TypeError:
pass
# try and infer types for any that are currently set to object
return result_df.convert_objects()
def _build_dataframe(self, ctx, dtype):
"""builds a dataframe from the collected data"""
ctx_id = ctx if isinstance(ctx, int) else ctx.get_id()
handler_dict = self.context_handler_dict[ctx_id]
if len(handler_dict) == 1:
# if there's only one handler simply get the dataframe from it
handler = next(iter(handler_dict.values()))
result_df = handler.get_dataframe(dtype=dtype)
else:
# otherwise do an outer join of all the handlers' dataframes
result_df = pa.DataFrame(dtype=dtype)
handler_keys, handlers = zip(*handler_dict.items())
dataframes = [h.get_dataframe(dtype=dtype) for h in handlers]
# relabel any overlapping columns
all_columns = [df.columns for df in dataframes]
node_names, short_names, ctx_ids = zip(*handler_keys)
new_columns = _relabel(all_columns, node_names, short_names, ctx_ids)
for df, cols in zip(dataframes, new_columns):
df.columns = cols
# join everything into a single dataframe
for df in dataframes:
result_df = result_df.join(df, how="outer")
result_df = result_df.reindex(columns=sorted(result_df.columns))
return result_df
def get_columns(self, node, ctx):
"""
returns the sub-set of columns in the dataframe returned
by get_dataframe that relate to a particular node
"""
ctx_id = ctx if isinstance(ctx, int) else ctx.get_id()
try:
return self._cached_columns[(ctx_id, node)]
except KeyError:
pass
handler_dict = self.context_handler_dict[ctx_id]
# the ctx is the root context passed to __call__, which may be
# different from the shifted contexts that the node was actually
# evaluated in.
# Get all the columns for this node in all sub-contexts.
columns = []
ctx_ids = []
for (node_name, short_name, sub_ctx_id), handler in handler_dict.items():
if node_name == node.name \
and short_name == node.short_name:
columns.append(handler.get_columns())
ctx_ids.append(sub_ctx_id)
# re-label in case the same node was evaluated in multiple sub-contexts
columns = _relabel(columns,
[node.name] * len(columns),
[node.short_name] * len(columns),
ctx_ids)
return reduce(operator.add, columns, [])
@property
def dataframes(self):
"""all dataframes created by this builder (one per context)"""
return [self.get_dataframe(ctx) for ctx in self.context_handler_dict.keys()]
@property
def dataframe(self):
return self.get_dataframe(self._last_ctx) if self._last_ctx is not None else None
def plot(self, show=True, **kwargs):
"""plots all collected dataframes and shows, if show=True"""
for df in self.dataframes:
df.plot(**kwargs)
legend = sorted(df.columns)
pp.legend(legend, loc='upper center', bbox_to_anchor=(0.5, -0.17), fancybox=True, shadow=True)
if show:
pp.show()
def finalize(self):
"""
Throw away intermediate structures and just retain any dataframes
and columns.
It's not possible to add more data to the builder after this has
been called.
"""
assert not self._finalized
# cache all dataframes and column sets
for ctx_id in list(self.context_handler_dict.keys()):
for node in self.nodes:
self._cached_columns[(ctx_id, node)] = self.get_columns(node, ctx_id)
self._cached_dataframes[ctx_id] = self.get_dataframe(ctx_id)
# delete the data for that context in case we're low on memory
del self.context_handler_dict[ctx_id]
# this should be empty now
assert len(self.context_handler_dict) == 0
# snap the version number if it's not already been taken (see __call__)
if not hasattr(self, "_version_"):
self._version_ = self.__version__
self._finalized = True
def combine_result(self, other, other_ctx, ctx):
"""
Adds a result from another df builder to this one.
If not already finalized this method will call finalize and
so no more data can be collected after this is called.
"""
ctx_id = ctx.get_id()
other_ctx_id = other_ctx.get_id()
# only the caches will be updated so make sure self has been
# finalized
if not self._finalized:
self.finalize()
# update self.nodes with any nodes from the other
nodes = set(self.nodes)
other_nodes = set(other.nodes)
additional_nodes = other_nodes.difference(nodes)
self.nodes += list(additional_nodes)
# copy the finalized data
for node in other.nodes:
self._cached_columns[(ctx_id, node)] = other.get_columns(node, other_ctx_id)
self._cached_dataframes[ctx_id] = other.get_dataframe(other_ctx_id)
class FinalValueCollector(object):
"""
callable object that collects the final values for a set of nodes.
For use with mdf.run
"""
def __init__(self, nodes):
if isinstance(nodes, MDFNode):
nodes = [nodes]
self.__nodes = nodes
self.__values = {}
self.__contexts = []
def __call__(self, date, ctx):
ctx_id = ctx.get_id()
self.__values[ctx_id] = [ctx.get_value(node) for node in self.__nodes]
if ctx_id not in self.__contexts:
self.__contexts.append(ctx_id)
def clear(self):
"""clears all previously collected values"""
self.__values.clear()
self.__contexts = []
def get_values(self, ctx=None):
"""returns the collected values for a context"""
if not self.__values:
return None
if ctx is None:
ctx = self.__contexts[-1]
ctx_id = ctx if isinstance(ctx, int) else ctx.get_id()
return self.__values.get(ctx_id, None)
def get_dict(self, ctx=None):
"""returns the collected values as a dict keyed by the nodes"""
values = self.get_values(ctx)
if values is None:
return None
return dict(zip(self.__nodes, values))
@property
def values(self):
"""returns the values for the last context"""
if not self.__contexts:
return None
ctx_id = self.__contexts[-1]
return self.get_values(ctx_id)
class NodeLogger(object):
"""
callable object for use with mdf run that logs a message
each time a node value changes.
"""
def __init__(self, nodes, fh=sys.stdout):
"""
``nodes`` is the list of node values to watch
``fh`` is a file like object to write to when changes are observed
"""
self.nodes = nodes
self.fh = fh
def __call__(self, date, ctx):
# get the max length of the node names for formatting nicely
max_len = max((len(node.name) for node in self.nodes))
fmt = "%%-%ds = %%s\n" % max_len
# get the initial values in the root context and any shifted contexts
root_ctx = ctx
values = [None] * len(self.nodes)
for i, node in enumerate(self.nodes):
values[i] = ctx[node]
# log the initial values
self.fh.write("%s:\n" % ctx)
for node, value in zip(self.nodes, values):
self.fh.write(fmt % (node.name, value))
self.fh.write("\n")
while True:
prev_values = list(values)
yield
# get the new values
for i, node in enumerate(self.nodes):
if node.has_value(ctx):
values[i] = ctx[node]
if values != prev_values:
self.fh.write("%s *changed*:\n" % ctx)
for node, value, prev_value in zip(self.nodes, values, prev_values):
if value != prev_value:
self.fh.write(fmt % (node.name, value))
self.fh.write("\n")
| mit |
weissercn/MLTools | Dalitz_simplified/evaluation_of_optimised_classifiers/svm_legendre/svm_Legendre_evaluation_of_optimised_classifiers.py | 1 | 2648 | import numpy as np
import math
import sys
sys.path.insert(0,'../..')
import os
import classifier_eval_simplified
from sklearn import tree
from sklearn.ensemble import AdaBoostClassifier
from sklearn.svm import SVC
for dim in range(1,2):
comp_file_list=[]
contrib_string0=""
contrib_string1=""
contrib_string2=""
contrib_string3=""
####################################################################
# Legendre samples operation
####################################################################
for counter in range(dim):
contrib_string0+= str(int((0+counter)%4))+"_0__"
contrib_string1+= str(int((1+counter)%4))+"_0__"
contrib_string2+= str(int((2+counter)%4))+"_0__"
contrib_string3+= str(int((3+counter)%4))+"_0__"
#for i in range(1):
#comp_file_list.append((os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/legendre/legendre_data/data_legendre_contrib0__1_0__"+contrib_string0+"contrib1__0_5__"+contrib_string1+"contrib2__2_0__"+contrib_string2+"contrib3__0_7__"+contrib_string3+"sample_{0}.txt".format(i),os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/legendre/legendre_data/data_legendre_contrib0__1_0__"+contrib_string0+"contrib1__0_0__"+contrib_string1+"contrib2__2_0__"+contrib_string2+"contrib3__0_7__"+contrib_string3+"sample_{0}.txt".format(i)))
#comp_file_list.append((os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/higher_dimensional_gauss/gauss_data/data_high" +str(dim)+"Dgauss_10000_0.5_0.1_0.0_{0}.txt".format(i),os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/higher_dimensional_gauss/gauss_data/data_high"+str(dim)+"Dgauss_10000_0.5_0.1_0.01_{0}.txt".format(i)))
comp_file_list=[(os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/legendre/legendre_data/data_sin_100_periods_1D_sample_0.txt",os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/legendre/legendre_data/data_sin_99_periods_1D_sample_0.txt")]
#clf = tree.DecisionTreeClassifier('gini','best',37, 89, 1, 0.0, None)
#clf = AdaBoostClassifier(base_estimator=tree.DecisionTreeClassifier(max_depth=2), learning_rate=0.01,n_estimators=983)
clf = SVC(C=496.6,gamma=0.00767,probability=True, cache_size=7000)
args=[str(dim)+ "Dlegendre_100vs99_svm","particle","antiparticle",100,comp_file_list,1,clf,np.logspace(-2, 10, 13),np.logspace(-9, 3, 13),0]
#For nn:
#args=[str(dim)+"Dgauss_nn","particle","antiparticle",100,comp_file_list,1,clf,np.logspace(-2, 10, 13),np.logspace(-9, 3, 13),params['dimof_middle'],params['n_hidden_layers']]
####################################################################
classifier_eval_simplified.classifier_eval(0,0,args)
| mit |
alexandrebarachant/mne-python | tutorials/plot_modifying_data_inplace.py | 1 | 2932 | """
.. _tut_modifying_data_inplace:
Modifying data in-place
=======================
"""
from __future__ import print_function
import mne
import os.path as op
import numpy as np
from matplotlib import pyplot as plt
###############################################################################
# It is often necessary to modify data once you have loaded it into memory.
# Common examples of this are signal processing, feature extraction, and data
# cleaning. Some functionality is pre-built into MNE-python, though it is also
# possible to apply an arbitrary function to the data.
# Load an example dataset, the preload flag loads the data into memory now
data_path = op.join(mne.datasets.sample.data_path(), 'MEG',
'sample', 'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(data_path, preload=True, verbose=False)
raw = raw.crop(0, 10)
print(raw)
###############################################################################
# Signal processing
# -----------------
#
# Most MNE objects have in-built methods for filtering:
filt_bands = [(1, 3), (3, 10), (10, 20), (20, 60)]
f, (ax, ax2) = plt.subplots(2, 1, figsize=(15, 10))
_ = ax.plot(raw._data[0])
for fband in filt_bands:
raw_filt = raw.copy()
raw_filt.filter(*fband, h_trans_bandwidth='auto', l_trans_bandwidth='auto',
filter_length='auto', phase='zero')
_ = ax2.plot(raw_filt[0][0][0])
ax2.legend(filt_bands)
ax.set_title('Raw data')
ax2.set_title('Band-pass filtered data')
###############################################################################
# In addition, there are functions for applying the Hilbert transform, which is
# useful to calculate phase / amplitude of your signal.
# Filter signal with a fairly steep filter, then take hilbert transform
raw_band = raw.copy()
raw_band.filter(12, 18, l_trans_bandwidth=2., h_trans_bandwidth=2.,
filter_length='auto', phase='zero')
raw_hilb = raw_band.copy()
hilb_picks = mne.pick_types(raw_band.info, meg=False, eeg=True)
raw_hilb.apply_hilbert(hilb_picks)
print(raw_hilb._data.dtype)
###############################################################################
# Finally, it is possible to apply arbitrary functions to your data to do
# what you want. Here we will use this to take the amplitude and phase of
# the hilbert transformed data.
#
# .. note:: You can also use ``amplitude=True`` in the call to
# :meth:`mne.io.Raw.apply_hilbert` to do this automatically.
#
# Take the amplitude and phase
raw_amp = raw_hilb.copy()
raw_amp.apply_function(np.abs, hilb_picks, float, 1)
raw_phase = raw_hilb.copy()
raw_phase.apply_function(np.angle, hilb_picks, float, 1)
f, (a1, a2) = plt.subplots(2, 1, figsize=(15, 10))
a1.plot(raw_band._data[hilb_picks[0]])
a1.plot(raw_amp._data[hilb_picks[0]])
a2.plot(raw_phase._data[hilb_picks[0]])
a1.set_title('Amplitude of frequency band')
a2.set_title('Phase of frequency band')
| bsd-3-clause |
khkaminska/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 232 | 4761 | from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
| bsd-3-clause |
quheng/scikit-learn | examples/svm/plot_svm_scale_c.py | 223 | 5375 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
SKravitsky/MachineLearningServer | Deployment/Server_Prediction2.py | 1 | 3170 | import numpy as np
import pandas as pd
import os
import sys
import pydot
import rds_config
import mysql.connector
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.externals.six import StringIO
config = {
'user': 'ECE32',
'password': 'seniordesign',
'host': 'septa-instance.ctejk6luw06s.us-west-2.rds.amazonaws.com',
'database': 'septa',
'raise_on_warnings': True,
}
def get_all_lines(user_id):
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor(dictionary=True)
sql4 = 'SELECT id_weekday, time_departure, id_station_origin, id_station_destination FROM trips WHERE id_user = "%s"' % user_id
sql = 'SELECT id_weekday, time_departure, id_station_origin, id_station_destination FROM trips'
df_mysql = pd.read_sql(sql, con=cnx)
#print df_mysql.dtypes
df_mysql.time_departure = df_mysql.time_departure.astype(int)
#print df_mysql.dtypes
#print df_mysql.head()
return df_mysql
def get_csv():
if os.path.exists("Update.csv"):
df = pd.read_csv("Update.csv")
return df
def scrub_df(data):
#print("* df.head()", data.head())
features = list(data.columns[:3])
targets = list(data.columns[3:])
#print("* features:", features)
#print("* targets:", targets)
X = data[features]
Y = data[targets]
#print("Head", X.tail())
#print("Head2", Y.tail())
return X,Y,features,targets
def prediction_accuracy(F, T, FN, TN):
clf = tree.DecisionTreeClassifier()
F_train, F_test, T_train, T_test = train_test_split(F, T, test_size = .2)
clf.fit(F, T)
predictions = clf.predict(F_test)
print accuracy_score(T_test, predictions)
#tree.export_graphviz(clf, out_file='tree.dot', feature_names=FN, filled=True, rounded=True)
#os.system('dot -Tpng tree.dot -o tree.png')
def prediction(F, T, FN, TN, data):
clf = tree.DecisionTreeClassifier()
clf.fit(F, T)
df_api = pd.DataFrame(data, columns = ['id_weekday','time_departure','id_station_origin'])
df_api.time_departure = df_api.time_departure.astype(int)
prediction = clf.predict(df_api)
return prediction
def start_function(user_id, weekday, time, station):
df = get_all_lines(user_id)
features, targets, fnames, tnames = scrub_df(df)
data = (weekday, time, station)
#print features
#prediction_accuracy(features, targets, fnames, tnames)
output_prediction = prediction(features, targets, fnames, tnames, data)
print output_prediction
def lambda_handler(event, context):
user_id = event['key1']
weekday = event['key2']
time = event['key3']
station = event['key4']
start_function(user_id, weekday, time, station)
if __name__ == "__main__":
user_id = 'e2f4uovEeYU'
df = get_all_lines(user_id)
features, targets, fnames, tnames = scrub_df(df)
print features
'''
df2 = get_csv()
features2, targets2, fnames2, tnames2 = scrub_df(df2)
print '----'
print features2
'''
prediction_accuracy(features, targets, fnames, tnames)
| apache-2.0 |
bluerover/6lbr | examples/6lbr/test/postprocessing/pp_plot_router.py | 2 | 26948 | from pylab import *
import re
import math
import inspect
from pp_utils import *
def scatterplot_Router_separate(results):
print "scatterplot_Router_separate"
data = {}
#dictionary data['Sxxxx']['delay'] = {'x':[], 'y',[]}
results = sorted(results, key=lambda k: k.topology)
ncol = 4
nrow = 3
xtitle = "Hop Count"
ytitle = "Reach Delay (s)"
for result in results:
print "%s - %s" % (result.mode,result.id)
if result.mode == "Router":
print "Router!!!"
if result.ping_info != None:
if 'ping1' in result.ping_info and result.ping_info['ping1'] != None:
if 'line' in result.topology:
topo = '-line'
else:
topo = '-other'
if result.id+topo not in data:
data[result.id+topo] = {}
if result.start_delay not in data[result.id+topo]:
data[result.id+topo][result.start_delay] = {'x':[], 'y':[]}
if 'ping2' in result.ping_info and result.ping_info['ping2'] != None:
pingnum = 'ping2'
else:
pingnum = 'ping1'
re_line_topo = re.compile(".*line-([0-9]+)-.*")
if topo == '-line':
data[result.id+topo][result.start_delay]['x'].append(int(re_line_topo.match(result.topology).group(1))-1)
data[result.id+topo][result.start_delay]['y'].append(int(result.time_info[pingnum])/1000)
else:
data[result.id+topo][result.start_delay]['x'].append(64 - int(result.ping_info[pingnum]['ttl']))
data[result.id+topo][result.start_delay]['y'].append(int(result.time_info[pingnum])/1000)
else:
print "No ping1 or none"
else:
print "No ping info"
formatter = matplotlib.ticker.EngFormatter(places=3)
formatter.ENG_PREFIXES[-6] = 'u'
fig100xline = plt.figure(figsize=(25,15)) #figsize=(,)
fig200xline = plt.figure(figsize=(25,15))
index100xline = 1
index110xline = 1
index111xline = 1
fig100xother = plt.figure(figsize=(25,15)) #figsize=(,)
fig200xother = plt.figure(figsize=(25,15))
index100xother = 1
index110xother = 1
index111xother = 1
fig100x = plt.figure(figsize=(25,15)) #figsize=(,)
fig200x = plt.figure(figsize=(25,15))
index100x = 1
index110x = 1
index111x = 1
for testid in data:
sortedid = sorted(data.keys())
# print sortedid
for start_delay in data[testid]:
sorteddelay = sorted(data[testid].keys())
# print sorteddelay
if 'line' in testid:
if 'S100' in testid:
idx = sorteddelay.index(start_delay)*ncol + int(math.ceil(float(sortedid.index(testid))/float(2))) + 1
ax = fig100xline.add_subplot(nrow,ncol,idx, title="%s-%s, %d points" % (testid,start_delay,len(data[testid][start_delay]['x'])), xlim=(0,10), ylim=(0,80), xlabel=xtitle, ylabel=ytitle)
ax.scatter(data[testid][start_delay]['x'],data[testid][start_delay]['y'])
print("plotting %s %s len %d" % (testid, start_delay, len(data[testid][start_delay]['x'])))
#print(index100xline)
#index100xline+=1
if 'S200' in testid:
idx = sorteddelay.index(start_delay)*ncol + int(math.ceil(float(sortedid.index(testid))/float(2)))-4 + 1
ax = fig200xline.add_subplot(nrow,ncol,idx, title="%s-%s, %d points" % (testid,start_delay,len(data[testid][start_delay]['x'])), xlim=(0,10), ylim=(0,80), xlabel=xtitle, ylabel=ytitle)
ax.scatter(data[testid][start_delay]['x'],data[testid][start_delay]['y'])
print("plotting %s %s len %d" % (testid, start_delay, len(data[testid][start_delay]['x'])))
if 'other' in testid:
if 'S100' in testid:
idx = sorteddelay.index(start_delay)*ncol + int(math.ceil(float(sortedid.index(testid))/float(2)))-1 + 1
ax = fig100xother.add_subplot(nrow,ncol,idx, title="%s-%s, %d points" % (testid,start_delay,len(data[testid][start_delay]['x'])), xlim=(0,10), ylim=(0,80), xlabel=xtitle, ylabel=ytitle)
ax.scatter(data[testid][start_delay]['x'],data[testid][start_delay]['y'])
print("plotting %s %s len %d" % (testid, start_delay, len(data[testid][start_delay]['x'])))
#print(index100xother)
#index100xother+=1
if 'S200' in testid:
idx = sorteddelay.index(start_delay)*ncol + int(math.ceil(float(sortedid.index(testid))/float(2)))-1-4 + 1
ax = fig200xother.add_subplot(nrow,ncol,idx, title="%s-%s, %d points" % (testid,start_delay,len(data[testid][start_delay]['x'])), xlim=(0,10), ylim=(0,80), xlabel=xtitle, ylabel=ytitle)
ax.scatter(data[testid][start_delay]['x'],data[testid][start_delay]['y'])
print("plotting %s %s len %d" % (testid, start_delay, len(data[testid][start_delay]['x'])))
#plt.axes().yaxis.set_major_formatter(formatter)
fig100xline.savefig('Router_100x_line.pdf', format='pdf')
fig200xline.savefig('Router_200x_line.pdf', format='pdf')
fig100xother.savefig('Router_100x_other.pdf', format='pdf')
fig200xother.savefig('Router_200x_other.pdf', format='pdf')
def scatterplot_Router(results):
print "scatterplot_Router"
data = {}
#dictionary data['Sxxxx']['delay'] = {'x':[], 'y',[]}
results = sorted(results, key=lambda k: k.topology)
ncol = 4
nrow = 3
xtitle = "Hop Count"
ytitle = "Reach Delay (s)"
for result in results:
if result.mode == "Router":
if result.ping_info != None:
if 'ping1' in result.ping_info or 'ping2' in result.ping_info:
if result.ping_info['ping1'] != None or ('ping2' in result.ping_info and result.ping_info['ping2'] != None):
if result.id not in data:
data[result.id] = {}
if result.start_delay not in data[result.id]:
data[result.id][result.start_delay] = {'x':[], 'y':[]}
if 'ping2' in result.ping_info and result.ping_info['ping2'] != None:
pingnum = 'ping2'
elif result.ping_info['ping1'] != None:
pingnum = 'ping1'
else:
continue
re_line_topo = re.compile(".*line-([0-9]+)-.*")
if 'line' in result.topology:
data[result.id][result.start_delay]['x'].append(int(re_line_topo.match(result.topology).group(1))-1)
data[result.id][result.start_delay]['y'].append(int(result.time_info[pingnum])/1000)
else:
data[result.id][result.start_delay]['x'].append(64 - int(result.ping_info[pingnum]['ttl']))
data[result.id][result.start_delay]['y'].append(int(result.time_info[pingnum])/1000)
fig100x = plt.figure(figsize=(25,15)) #figsize=(,)
fig200x = plt.figure(figsize=(25,15))
index100x = 1
for testid in data:
sortedid = sorted(data.keys())
# print sortedid
for start_delay in sorted(data[testid].keys()):
sorteddelay = sorted(data[testid].keys())
# print sorteddelay
if 'S100' in testid:
idx = sorteddelay.index(start_delay)*ncol + sortedid.index(testid) + 1
ax = fig100x.add_subplot(nrow,ncol,idx, title="%s-%s, %d points" % (testid,start_delay,len(data[testid][start_delay]['x'])), xlim=(0,10), ylim=(0,80), xlabel=xtitle, ylabel=ytitle)
ax.scatter(data[testid][start_delay]['x'],data[testid][start_delay]['y'])
print("plotting %s %s len %d" % (testid, start_delay, len(data[testid][start_delay]['x'])))
#print(index100x)
#index100x+=1
if 'S200' in testid:
idx = sorteddelay.index(start_delay)*ncol + sortedid.index(testid)-4 + 1
ax = fig200x.add_subplot(nrow,ncol,idx, title="%s-%s, %d points" % (testid,start_delay,len(data[testid][start_delay]['x'])), xlim=(0,10), ylim=(0,80), xlabel=xtitle, ylabel=ytitle)
ax.scatter(data[testid][start_delay]['x'],data[testid][start_delay]['y'])
print("plotting %s %s len %d" % (testid, start_delay, len(data[testid][start_delay]['x'])))
#plt.axes().yaxis.set_major_formatter(formatter)
fig100x.savefig('Router_100x.pdf', format='pdf')
fig200x.savefig('Router_200x.pdf', format='pdf')
def scatterplot_Router_mean(results):
print "scatterplot_Router_mean"
data = {}
#dictionary data['Sxxxx']['delay'] = {'x':[], 'y',[]}
results = sorted(results, key=lambda k: k.topology)
ncol = 4
nrow = 3
lonelynesslevel = 1
xtitle = "Hop Count"
ytitle = "Reach Delay (s)"
for result in results:
if result.mode == "SmartBridgeAuto":
if result.ping_info != None:
if 'ping1' in result.ping_info and result.ping_info['ping1'] != None:
if result.id not in data:
data[result.id] = {}
if result.start_delay not in data[result.id]:
data[result.id][result.start_delay] = {'x':[], 'y':[], 'xmean':[], 'ymean':[], 'ystd':[]}
if 'ping2' in result.ping_info and result.ping_info['ping2'] != None:
pingnum = 'ping2'
else:
pingnum = 'ping1'
re_line_topo = re.compile(".*line-([0-9]+)-.*")
if 'line' in result.topology:
data[result.id][result.start_delay]['x'].append(int(re_line_topo.match(result.topology).group(1))-1)
data[result.id][result.start_delay]['y'].append(int(result.time_info[pingnum])/1000)
else:
data[result.id][result.start_delay]['x'].append(64 - int(result.ping_info[pingnum]['ttl']))
data[result.id][result.start_delay]['y'].append(int(result.time_info[pingnum])/1000)
figmean100x = plt.figure(figsize=(25,15))
figmean110x = plt.figure(figsize=(25,15))
figmean111x = plt.figure(figsize=(25,15))
figmean200x = plt.figure(figsize=(25,15))
indexmean100x = 0
indexmean110x = 0
indexmean111x = 0
indexmean200x = 0
print " mean"
plotcolor = 'r'
plotmarker = 'o'
plotline = '-'
for testid in sorted(data.keys()):
if 'S100' in testid:
indexmean100x += 1
ax = figmean100x.add_subplot(nrow,ncol,indexmean100x, title="Mean values %s, all delays" % (testid,), xlim=(0,10), ylim=(0,65), xlabel=xtitle, ylabel=ytitle)
for start_delay in sorted(data[testid].keys()):
data[testid][start_delay]['xmean'] = sorted(unique(data[testid][start_delay]['x']))
temp = [[] for i in range(len(data[testid][start_delay]['xmean']))]
for k in range(len(data[testid][start_delay]['x'])):
temp[data[testid][start_delay]['xmean'].index(data[testid][start_delay]['x'][k])].append(data[testid][start_delay]['y'][k])
for i in range(len(temp)):
data[testid][start_delay]['ymean'].append(mean(temp[i]).tolist())
data[testid][start_delay]['ystd'].append(std(temp[i]).tolist())
if sorted(data[testid].keys()).index(start_delay) == 0:
plotcolor = 'b'
plotmarker = 'x'
elif sorted(data[testid].keys()).index(start_delay) == 1:
plotcolor = 'g'
plotmarker = 'o'
else:
plotcolor = 'r'
plotmarker = '^'
pruned = prunevalues(data[testid][start_delay]['xmean'],data[testid][start_delay]['ymean'],data[testid][start_delay]['ystd'], lonelyness=lonelynesslevel)
ax.plot(pruned['x'],pruned['y'], label="DAG delay %ds"%int(start_delay), linestyle=plotline, marker=plotmarker, color=plotcolor)
ax.errorbar(pruned['x'],pruned['y'], pruned['z'], fmt='-', color=plotcolor)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1])
# ax.plot(data[testid][start_delay]['xmean'],data[testid][start_delay]['ymean'], label="DAG delay %ds"%int(start_delay), linestyle=plotline, marker=plotmarker, color=plotcolor)
# ax.errorbar(data[testid][start_delay]['xmean'], data[testid][start_delay]['ymean'], data[testid][start_delay]['ystd'], fmt='-', color=plotcolor)
# print("plotting mean %s %s len %d" % (testid, start_delay, len(data[testid][start_delay]['xmean'])))
if 'S200' in testid:
indexmean200x += 1
ax = figmean200x.add_subplot(nrow,ncol,indexmean200x, title="Mean values %s, all delays" % (testid,), xlim=(0,10), ylim=(0,65), xlabel=xtitle, ylabel=ytitle)
for start_delay in sorted(data[testid].keys()):
data[testid][start_delay]['xmean'] = sorted(unique(data[testid][start_delay]['x']))
temp = [[] for i in range(len(data[testid][start_delay]['xmean']))]
for k in range(len(data[testid][start_delay]['x'])):
temp[data[testid][start_delay]['xmean'].index(data[testid][start_delay]['x'][k])].append(data[testid][start_delay]['y'][k])
for i in range(len(temp)):
data[testid][start_delay]['ymean'].append(mean(temp[i]).tolist())
data[testid][start_delay]['ystd'].append(std(temp[i]).tolist())
if sorted(data[testid].keys()).index(start_delay) == 0:
plotcolor = 'b'
plotmarker = 'x'
elif sorted(data[testid].keys()).index(start_delay) == 1:
plotcolor = 'g'
plotmarker = 'o'
else:
plotcolor = 'r'
plotmarker = '^'
pruned = prunevalues(data[testid][start_delay]['xmean'],data[testid][start_delay]['ymean'],data[testid][start_delay]['ystd'], lonelyness=lonelynesslevel)
ax.plot(pruned['x'],pruned['y'], label="DAG delay %ds"%int(start_delay), linestyle=plotline, marker=plotmarker, color=plotcolor)
ax.errorbar(pruned['x'],pruned['y'], pruned['z'], fmt='-', color=plotcolor)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1])
# ax.plot(data[testid][start_delay]['xmean'],data[testid][start_delay]['ymean'], label="DAG delay %ds"%int(start_delay), linestyle=plotline, marker=plotmarker, color=plotcolor)
# ax.errorbar(data[testid][start_delay]['xmean'], data[testid][start_delay]['ymean'], data[testid][start_delay]['ystd'], fmt='-', color=plotcolor)
# print("plotting mean %s %s len %d" % (testid, start_delay, len(data[testid][start_delay]['xmean'])))
figmean100x.savefig('Router_mean100x.pdf', format='pdf')
figmean200x.savefig('Router_mean200x.pdf', format='pdf')
figmeandelay100x = plt.figure(figsize=(25,15))
figmeandelay110x = plt.figure(figsize=(25,15))
figmeandelay111x = plt.figure(figsize=(25,15))
figmeandelay200x = plt.figure(figsize=(25,15))
figmeantraffic100x = plt.figure(figsize=(25,15))
figmeantraffic110x = plt.figure(figsize=(25,15))
figmeantraffic111x = plt.figure(figsize=(25,15))
figmeantraffic200x = plt.figure(figsize=(25,15))
#Prepare all the data
alldata = {}
for testid in sorted(data.keys()):
if not alldata.has_key(testid[:-1]):
alldata[testid[:-1]] = {}
if not alldata[testid[:-1]].has_key('x'+testid[-1:]):
alldata[testid[:-1]]['x'+testid[-1:]] = {}
for start_delay in sorted(data[testid].keys()):
if not alldata[testid[:-1]]['x'+testid[-1:]].has_key(start_delay):
alldata[testid[:-1]]['x'+testid[-1:]][start_delay] = {}
alldata[testid[:-1]]['x'+testid[-1:]][start_delay]["x"] = data[testid][start_delay]['xmean']
alldata[testid[:-1]]['x'+testid[-1:]][start_delay]["ymean"] = data[testid][start_delay]['ymean']
datadelay = {}
datatraffic = {}
for testclass in sorted(alldata.keys()):
if not datadelay.has_key(testclass):
datadelay[testclass] = {}
if not datatraffic.has_key(testclass):
datatraffic[testclass] = {}
for traffic in sorted(alldata[testclass].keys()):
for start_delay in sorted(alldata[testclass][traffic].keys()):
if not datadelay[testclass].has_key(start_delay):
datadelay[testclass][start_delay] = {}
if not datatraffic[testclass].has_key(traffic):
datatraffic[testclass][traffic] = {}
if not datadelay[testclass][start_delay].has_key("x") or not datadelay[testclass][start_delay].has_key("y"):
tmp = mergevector(None,None,alldata[testclass][traffic][start_delay]["x"],alldata[testclass][traffic][start_delay]["ymean"])
else:
tmp = mergevector(datadelay[testclass][start_delay]["x"],datadelay[testclass][start_delay]["y"],alldata[testclass][traffic][start_delay]["x"],alldata[testclass][traffic][start_delay]["ymean"])
datadelay[testclass][start_delay]["x"] = tmp["x"]
datadelay[testclass][start_delay]["y"] = tmp["y"]
if not datatraffic[testclass][traffic].has_key("x") or not datatraffic[testclass][traffic].has_key("y"):
tmp = mergevector(None,None,alldata[testclass][traffic][start_delay]["x"],alldata[testclass][traffic][start_delay]["ymean"])
else:
tmp = mergevector(datatraffic[testclass][traffic]["x"],datatraffic[testclass][traffic]["y"],alldata[testclass][traffic][start_delay]["x"],alldata[testclass][traffic][start_delay]["ymean"])
datatraffic[testclass][traffic]["x"] = tmp["x"]
datatraffic[testclass][traffic]["y"] = tmp["y"]
indexmean100x = 0
indexmean110x = 0
indexmean111x = 0
indexmean200x = 0
print " meandelay"
for testclass in sorted(datadelay.keys()):
if 'S100' in testclass:
indexmean100x += 1
ax = figmeandelay100x.add_subplot(nrow,ncol,indexmean100x, title="Mean values %s, mixed traffic by delay" % (testclass,), xlim=(0,10), ylim=(0,65), xlabel=xtitle, ylabel=ytitle)
for start_delay in sorted(datadelay[testclass].keys()):
if sorted(datadelay[testclass].keys()).index(start_delay) == 0:
plotcolor = 'b'
plotmarker = 'x'
elif sorted(datadelay[testclass].keys()).index(start_delay) == 1:
plotcolor = 'g'
plotmarker = 'o'
else:
plotcolor = 'r'
plotmarker = '^'
pruned = prunevalues(datadelay[testclass][start_delay]['x'],datadelay[testclass][start_delay]['y'], lonelyness=lonelynesslevel)
ax.plot(pruned['x'],mean(pruned['y'],0), label="DAG delay %ds"%int(start_delay), linestyle=plotline, marker=plotmarker, color=plotcolor)
ax.errorbar(pruned['x'], mean(pruned['y'],0), std(pruned['y'],0), fmt='-', color=plotcolor)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1])
# ax.plot(datadelay[testclass][start_delay]['x'],mean(datadelay[testclass][start_delay]['y'],0), label="DAG delay %ds"%int(start_delay), linestyle=plotline, marker=plotmarker, color=plotcolor)
# ax.errorbar(datadelay[testclass][start_delay]['x'], mean(datadelay[testclass][start_delay]['y'],0), std(datadelay[testclass][start_delay]['y'],0), fmt='-', color=plotcolor)
if 'S200' in testclass:
indexmean200x += 1
ax = figmeandelay200x.add_subplot(nrow,ncol,indexmean200x, title="Mean values %s, mixed traffic by delay" % (testclass,), xlim=(0,10), ylim=(0,65), xlabel=xtitle, ylabel=ytitle)
for start_delay in sorted(datadelay[testclass].keys()):
if sorted(datadelay[testclass].keys()).index(start_delay) == 0:
plotcolor = 'b'
plotmarker = 'x'
elif sorted(datadelay[testclass].keys()).index(start_delay) == 1:
plotcolor = 'g'
plotmarker = 'o'
else:
plotcolor = 'r'
plotmarker = '^'
pruned = prunevalues(datadelay[testclass][start_delay]['x'],datadelay[testclass][start_delay]['y'], lonelyness=lonelynesslevel)
ax.plot(pruned['x'],mean(pruned['y'],0), label="DAG delay %ds"%int(start_delay), linestyle=plotline, marker=plotmarker, color=plotcolor)
ax.errorbar(pruned['x'], mean(pruned['y'],0), std(pruned['y'],0), fmt='-', color=plotcolor)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1])
# ax.plot(datadelay[testclass][start_delay]['x'],mean(datadelay[testclass][start_delay]['y'],0), label="DAG delay %ds"%int(start_delay), linestyle=plotline, marker=plotmarker, color=plotcolor)
# ax.errorbar(datadelay[testclass][start_delay]['x'], mean(datadelay[testclass][start_delay]['y'],0), std(datadelay[testclass][start_delay]['y'],0), fmt='-', color=plotcolor)
figmeandelay100x.savefig('Router_meandelay100x.pdf', format='pdf')
figmeandelay200x.savefig('Router_meandelay200x.pdf', format='pdf')
indexmean100x = 0
indexmean200x = 0
print " meantraffic"
for testclass in sorted(datatraffic.keys()):
if 'S100' in testclass:
indexmean100x += 1
ax = figmeantraffic100x.add_subplot(nrow,ncol,indexmean100x, title="Mean values %s, mixed delay by traffic" % (testclass,), xlim=(0,10), ylim=(0,65), xlabel=xtitle, ylabel=ytitle)
for traffic in sorted(datatraffic[testclass].keys()):
if sorted(datatraffic[testclass].keys()).index(traffic) == 0:
plotcolor = 'b'
plotmarker = 'x'
plotlabel = 'No extra traffic'
elif sorted(datatraffic[testclass].keys()).index(traffic) == 1:
plotcolor = 'g'
plotmarker = 'o'
plotlabel = 'Self UDP collect traffic'
elif sorted(datatraffic[testclass].keys()).index(traffic) == 2:
plotcolor = 'y'
plotmarker = 's'
plotlabel = 'All-node UDP collect traffic'
else:
plotcolor = 'r'
plotmarker = '^'
plotlabel = 'All-node UDP echo traffic'
pruned = prunevalues(datatraffic[testclass][traffic]['x'],datatraffic[testclass][traffic]['y'], lonelyness=lonelynesslevel)
ax.plot(pruned['x'],mean(pruned['y'],0), label=plotlabel, linestyle=plotline, marker=plotmarker, color=plotcolor)
ax.errorbar(pruned['x'], mean(pruned['y'],0), std(pruned['y'],0), fmt='-', color=plotcolor)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1])
# ax.plot(datatraffic[testclass][traffic]['x'],mean(datatraffic[testclass][traffic]['y'],0), label="Traffic %s"%traffic, linestyle=plotline, marker=plotmarker, color=plotcolor)
# ax.errorbar(datatraffic[testclass][traffic]['x'], mean(datatraffic[testclass][traffic]['y'],0), std(datatraffic[testclass][traffic]['y'],0), fmt='-', color=plotcolor)
if 'S200' in testclass:
indexmean200x += 1
ax = figmeantraffic200x.add_subplot(nrow,ncol,indexmean200x, title="Mean values %s, mixed delay by traffic" % (testclass,), xlim=(0,10), ylim=(0,65), xlabel=xtitle, ylabel=ytitle)
for traffic in sorted(datatraffic[testclass].keys()):
if sorted(datatraffic[testclass].keys()).index(traffic) == 0:
plotcolor = 'b'
plotmarker = 'x'
plotlabel = 'No extra traffic'
elif sorted(datatraffic[testclass].keys()).index(traffic) == 1:
plotcolor = 'g'
plotmarker = 'o'
plotlabel = 'Self UDP collect traffic'
elif sorted(datatraffic[testclass].keys()).index(traffic) == 2:
plotcolor = 'y'
plotmarker = 's'
plotlabel = 'All-node UDP collect traffic'
else:
plotcolor = 'r'
plotmarker = '^'
plotlabel = 'All-node UDP echo traffic'
pruned = prunevalues(datatraffic[testclass][traffic]['x'],datatraffic[testclass][traffic]['y'], lonelyness=lonelynesslevel)
ax.plot(pruned['x'],mean(pruned['y'],0), label=plotlabel, linestyle=plotline, marker=plotmarker, color=plotcolor)
ax.errorbar(pruned['x'], mean(pruned['y'],0), std(pruned['y'],0), fmt='-', color=plotcolor)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1])
# ax.plot(datatraffic[testclass][traffic]['x'],mean(datatraffic[testclass][traffic]['y'],0), label="Traffic %s"%traffic, linestyle=plotline, marker=plotmarker, color=plotcolor)
# ax.errorbar(datatraffic[testclass][traffic]['x'], mean(datatraffic[testclass][traffic]['y'],0), std(datatraffic[testclass][traffic]['y'],0), fmt='-', color=plotcolor)
figmeantraffic100x.savefig('Router_meantraffic100x.pdf', format='pdf')
figmeantraffic200x.savefig('Router_meantraffic200x.pdf', format='pdf')
| bsd-3-clause |
Razvy000/ANN-Intro | test_ann.py | 1 | 4712 | from __future__ import print_function
import unittest
from ann import ANN
class TestANN(unittest.TestCase):
def disabled( f):
def _decorator():
print(f.__name__ + ' has been disabled')
return _decorator
@disabled
def test_xor_trainig(self):
print("test_xor_trainig...")
nn = ANN([2, 2, 1])
inputs = [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]]
targets = [[0.0], [1.0], [1.0], [0.0]]
predicts = []
# train
nn.train(40000, inputs, targets)
for i in range(len(targets)):
predicts.append(nn.predict(inputs[i]))
# the prediction for 0,0 and 1,1 should be less than prediction for 0,1 and 1,0
self.assertTrue(predicts[0] < predicts[1], 'xor relation1 not learned')
self.assertTrue(predicts[0] < predicts[2], 'xor relation2 not learned')
self.assertTrue(predicts[3] < predicts[1], 'xor relation3 not learned')
self.assertTrue(predicts[3] < predicts[2], 'xor relation4 not learned')
def test_mnist_28by28(self):
import time
import os
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_digits
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.preprocessing import LabelBinarizer
from ann import ANN
# load lecun mnist dataset
X = []
y = []
with open('data/mnist_test_data.txt', 'r') as fd, open('data/mnist_test_label.txt', 'r') as fl:
for line in fd:
img = line.split()
pixels = [int(pixel) for pixel in img]
X.append(pixels)
for line in fl:
pixel = int(line)
y.append(pixel)
X = np.array(X, np.float)
y = np.array(y, np.float)
# normalize input into [0, 1]
X -= X.min()
X /= X.max()
# quick test
#X = X[:1000]
#y = y[:1000]
# for my network
X_test = X
y_test = y #LabelBinarizer().fit_transform(y)
nn = ANN([1,1])
nn = nn.deserialize('28_200000.pickle') # '28_100000.pickle'
predictions = []
for i in range(X_test.shape[0]):
o = nn.predict(X_test[i])
predictions.append(np.argmax(o))
# compute a confusion matrix
print("confusion matrix")
print(confusion_matrix(y_test, predictions))
# show a classification report
print("classification report")
print(classification_report(y_test, predictions))
@disabled
def test_mnist_8by8_training(self):
print("test_mnist_8by8_training")
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_digits
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import precision_score, recall_score
# import the simplified mnist dataset from scikit learn
digits = load_digits()
# get the input vectors (X is a vector of vectors of type int)
X = digits.data
# get the output vector ( y is a vector of type int)
y = digits.target
# normalize input into [0, 1]
X -= X.min()
X /= X.max()
# split data into training and testing 75% of examples are used for training and 25% are used for testing
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=123)
# binarize the labels from a number into a vector with a 1 at that index
labels_train = LabelBinarizer().fit_transform(y_train)
labels_test = LabelBinarizer().fit_transform(y_test)
# convert from numpy to normal python list for our simple implementation
X_train_l = X_train.tolist()
labels_train_l = labels_train.tolist()
# create the artificial neuron network with:
# 1 input layer of size 64 (the images are 8x8 gray pixels)
# 1 hidden layer of size 100
# 1 output layer of size 10 (the labels of digits are 0 to 9)
nn = ANN([64, 100, 10])
# see how long training takes
startTime = time.time()
# train it
nn.train(10, X_train_l, labels_train_l)
elapsedTime = time.time() - startTime
print("time took " + str(elapsedTime))
self.assertTrue(elapsedTime < 300, 'Training took more than 300 seconds')
# compute the predictions
predictions = []
for i in range(X_test.shape[0]):
o = nn.predict(X_test[i])
predictions.append(np.argmax(o))
# compute a confusion matrix
# print(confusion_matrix(y_test, predictions))
# print(classification_report(y_test, predictions))
precision = precision_score(y_test, predictions, average='macro')
print("precision", precision)
recall = recall_score(y_test, predictions, average='macro')
print("recall", recall)
self.assertTrue(precision > 0.93, 'Precision must be bigger than 93%')
self.assertTrue(recall > 0.93, 'Recall must be bigger than 93%')
if __name__ == '__main__':
unittest.main()
| mit |
amondot/QGIS | python/plugins/processing/algs/qgis/QGISAlgorithmProvider.py | 5 | 9868 | # -*- coding: utf-8 -*-
"""
***************************************************************************
QGISAlgorithmProvider.py
---------------------
Date : December 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'December 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
try:
import matplotlib.pyplot
hasMatplotlib = True
except:
hasMatplotlib = False
from PyQt4.QtGui import QIcon
from processing.core.AlgorithmProvider import AlgorithmProvider
from processing.script.ScriptUtils import ScriptUtils
from RegularPoints import RegularPoints
from SymmetricalDifference import SymmetricalDifference
from VectorSplit import VectorSplit
from VectorGrid import VectorGrid
from RandomExtract import RandomExtract
from RandomExtractWithinSubsets import RandomExtractWithinSubsets
from ExtractByLocation import ExtractByLocation
from PointsInPolygon import PointsInPolygon
from PointsInPolygonUnique import PointsInPolygonUnique
from PointsInPolygonWeighted import PointsInPolygonWeighted
from SumLines import SumLines
from BasicStatisticsNumbers import BasicStatisticsNumbers
from BasicStatisticsStrings import BasicStatisticsStrings
from NearestNeighbourAnalysis import NearestNeighbourAnalysis
from LinesIntersection import LinesIntersection
from MeanCoords import MeanCoords
from PointDistance import PointDistance
from UniqueValues import UniqueValues
from ReprojectLayer import ReprojectLayer
from ExportGeometryInfo import ExportGeometryInfo
from Centroids import Centroids
from Delaunay import Delaunay
from VoronoiPolygons import VoronoiPolygons
from DensifyGeometries import DensifyGeometries
from MultipartToSingleparts import MultipartToSingleparts
from SimplifyGeometries import SimplifyGeometries
from LinesToPolygons import LinesToPolygons
from PolygonsToLines import PolygonsToLines
from SinglePartsToMultiparts import SinglePartsToMultiparts
from ExtractNodes import ExtractNodes
from ConvexHull import ConvexHull
from FixedDistanceBuffer import FixedDistanceBuffer
from VariableDistanceBuffer import VariableDistanceBuffer
from Clip import Clip
from Difference import Difference
from Dissolve import Dissolve
from Intersection import Intersection
from ExtentFromLayer import ExtentFromLayer
from RandomSelection import RandomSelection
from RandomSelectionWithinSubsets import RandomSelectionWithinSubsets
from SelectByLocation import SelectByLocation
from Union import Union
from DensifyGeometriesInterval import DensifyGeometriesInterval
from Eliminate import Eliminate
from SpatialJoin import SpatialJoin
from DeleteColumn import DeleteColumn
from DeleteHoles import DeleteHoles
from DeleteDuplicateGeometries import DeleteDuplicateGeometries
from TextToFloat import TextToFloat
from ExtractByAttribute import ExtractByAttribute
from SelectByAttribute import SelectByAttribute
from Grid import Grid
from Gridify import Gridify
from HubDistance import HubDistance
from HubLines import HubLines
from Merge import Merge
from GeometryConvert import GeometryConvert
from ConcaveHull import ConcaveHull
from Polygonize import Polygonize
from RasterLayerStatistics import RasterLayerStatistics
from StatisticsByCategories import StatisticsByCategories
from EquivalentNumField import EquivalentNumField
from AddTableField import AddTableField
from FieldsCalculator import FieldsCalculator
from SaveSelectedFeatures import SaveSelectedFeatures
from Explode import Explode
from AutoincrementalField import AutoincrementalField
from FieldPyculator import FieldsPyculator
from JoinAttributes import JoinAttributes
from CreateConstantRaster import CreateConstantRaster
from PointsLayerFromTable import PointsLayerFromTable
from PointsDisplacement import PointsDisplacement
from ZonalStatistics import ZonalStatistics
from PointsFromPolygons import PointsFromPolygons
from PointsFromLines import PointsFromLines
from RandomPointsExtent import RandomPointsExtent
from RandomPointsLayer import RandomPointsLayer
from RandomPointsPolygonsFixed import RandomPointsPolygonsFixed
from RandomPointsPolygonsVariable import RandomPointsPolygonsVariable
from RandomPointsAlongLines import RandomPointsAlongLines
from PointsToPaths import PointsToPaths
from PostGISExecuteSQL import PostGISExecuteSQL
from ImportIntoPostGIS import ImportIntoPostGIS
from SetVectorStyle import SetVectorStyle
from SetRasterStyle import SetRasterStyle
from SelectByExpression import SelectByExpression
from SelectByAttributeSum import SelectByAttributeSum
from HypsometricCurves import HypsometricCurves
from SplitLinesWithLines import SplitLinesWithLines
from FieldsMapper import FieldsMapper
from Datasources2Vrt import Datasources2Vrt
from CheckValidity import CheckValidity
pluginPath = os.path.normpath(os.path.join(
os.path.split(os.path.dirname(__file__))[0], os.pardir))
class QGISAlgorithmProvider(AlgorithmProvider):
_icon = QIcon(os.path.join(pluginPath, 'images', 'qgis.png'))
def __init__(self):
AlgorithmProvider.__init__(self)
self.alglist = [SumLines(), PointsInPolygon(),
PointsInPolygonWeighted(), PointsInPolygonUnique(),
BasicStatisticsStrings(), BasicStatisticsNumbers(),
NearestNeighbourAnalysis(), MeanCoords(),
LinesIntersection(), UniqueValues(), PointDistance(),
ReprojectLayer(), ExportGeometryInfo(), Centroids(),
Delaunay(), VoronoiPolygons(), SimplifyGeometries(),
DensifyGeometries(), DensifyGeometriesInterval(),
MultipartToSingleparts(), SinglePartsToMultiparts(),
PolygonsToLines(), LinesToPolygons(), ExtractNodes(),
Eliminate(), ConvexHull(), FixedDistanceBuffer(),
VariableDistanceBuffer(), Dissolve(), Difference(),
Intersection(), Union(), Clip(), ExtentFromLayer(),
RandomSelection(), RandomSelectionWithinSubsets(),
SelectByLocation(), RandomExtract(), DeleteHoles(),
RandomExtractWithinSubsets(), ExtractByLocation(),
SpatialJoin(), RegularPoints(), SymmetricalDifference(),
VectorSplit(), VectorGrid(), DeleteColumn(),
DeleteDuplicateGeometries(), TextToFloat(),
ExtractByAttribute(), SelectByAttribute(), Grid(),
Gridify(), HubDistance(), HubLines(), Merge(),
GeometryConvert(), AddTableField(), FieldsCalculator(),
SaveSelectedFeatures(), JoinAttributes(),
AutoincrementalField(), Explode(), FieldsPyculator(),
EquivalentNumField(), PointsLayerFromTable(),
StatisticsByCategories(), ConcaveHull(), Polygonize(),
RasterLayerStatistics(), PointsDisplacement(),
ZonalStatistics(), PointsFromPolygons(),
PointsFromLines(), RandomPointsExtent(),
RandomPointsLayer(), RandomPointsPolygonsFixed(),
RandomPointsPolygonsVariable(),
RandomPointsAlongLines(), PointsToPaths(),
PostGISExecuteSQL(), ImportIntoPostGIS(),
SetVectorStyle(), SetRasterStyle(),
SelectByExpression(), HypsometricCurves(),
SplitLinesWithLines(), CreateConstantRaster(),
FieldsMapper(),SelectByAttributeSum(), Datasources2Vrt(),
CheckValidity()
]
if hasMatplotlib:
from VectorLayerHistogram import VectorLayerHistogram
from RasterLayerHistogram import RasterLayerHistogram
from VectorLayerScatterplot import VectorLayerScatterplot
from MeanAndStdDevPlot import MeanAndStdDevPlot
from BarPlot import BarPlot
from PolarPlot import PolarPlot
self.alglist.extend([
VectorLayerHistogram(), RasterLayerHistogram(),
VectorLayerScatterplot(), MeanAndStdDevPlot(), BarPlot(),
PolarPlot(),
])
folder = os.path.join(os.path.dirname(__file__), 'scripts')
scripts = ScriptUtils.loadFromFolder(folder)
for script in scripts:
script.allowEdit = False
self.alglist.extend(scripts)
for alg in self.alglist:
alg._icon = self._icon
def initializeSettings(self):
AlgorithmProvider.initializeSettings(self)
def unload(self):
AlgorithmProvider.unload(self)
def getName(self):
return 'qgis'
def getDescription(self):
return self.tr('QGIS geoalgorithms')
def getIcon(self):
return self._icon
def _loadAlgorithms(self):
self.algs = self.alglist
def supportsNonFileBasedOutput(self):
return True
| gpl-2.0 |
eadgarchen/tensorflow | tensorflow/contrib/timeseries/examples/lstm.py | 9 | 9321 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A more advanced example, of building an RNN-based time series model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import numpy
import tensorflow as tf
from tensorflow.contrib.timeseries.python.timeseries import estimators as ts_estimators
from tensorflow.contrib.timeseries.python.timeseries import model as ts_model
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/multivariate_periods.csv")
class _LSTMModel(ts_model.SequentialTimeSeriesModel):
"""A time series model-building example using an RNNCell."""
def __init__(self, num_units, num_features, dtype=tf.float32):
"""Initialize/configure the model object.
Note that we do not start graph building here. Rather, this object is a
configurable factory for TensorFlow graphs which are run by an Estimator.
Args:
num_units: The number of units in the model's LSTMCell.
num_features: The dimensionality of the time series (features per
timestep).
dtype: The floating point data type to use.
"""
super(_LSTMModel, self).__init__(
# Pre-register the metrics we'll be outputting (just a mean here).
train_output_names=["mean"],
predict_output_names=["mean"],
num_features=num_features,
dtype=dtype)
self._num_units = num_units
# Filled in by initialize_graph()
self._lstm_cell = None
self._lstm_cell_run = None
self._predict_from_lstm_output = None
def initialize_graph(self, input_statistics):
"""Save templates for components, which can then be used repeatedly.
This method is called every time a new graph is created. It's safe to start
adding ops to the current default graph here, but the graph should be
constructed from scratch.
Args:
input_statistics: A math_utils.InputStatistics object.
"""
super(_LSTMModel, self).initialize_graph(input_statistics=input_statistics)
self._lstm_cell = tf.nn.rnn_cell.LSTMCell(num_units=self._num_units)
# Create templates so we don't have to worry about variable reuse.
self._lstm_cell_run = tf.make_template(
name_="lstm_cell",
func_=self._lstm_cell,
create_scope_now_=True)
# Transforms LSTM output into mean predictions.
self._predict_from_lstm_output = tf.make_template(
name_="predict_from_lstm_output",
func_=
lambda inputs: tf.layers.dense(inputs=inputs, units=self.num_features),
create_scope_now_=True)
def get_start_state(self):
"""Return initial state for the time series model."""
return (
# Keeps track of the time associated with this state for error checking.
tf.zeros([], dtype=tf.int64),
# The previous observation or prediction.
tf.zeros([self.num_features], dtype=self.dtype),
# The state of the RNNCell (batch dimension removed since this parent
# class will broadcast).
[tf.squeeze(state_element, axis=0)
for state_element
in self._lstm_cell.zero_state(batch_size=1, dtype=self.dtype)])
def _filtering_step(self, current_times, current_values, state, predictions):
"""Update model state based on observations.
Note that we don't do much here aside from computing a loss. In this case
it's easier to update the RNN state in _prediction_step, since that covers
running the RNN both on observations (from this method) and our own
predictions. This distinction can be important for probabilistic models,
where repeatedly predicting without filtering should lead to low-confidence
predictions.
Args:
current_times: A [batch size] integer Tensor.
current_values: A [batch size, self.num_features] floating point Tensor
with new observations.
state: The model's state tuple.
predictions: The output of the previous `_prediction_step`.
Returns:
A tuple of new state and a predictions dictionary updated to include a
loss (note that we could also return other measures of goodness of fit,
although only "loss" will be optimized).
"""
state_from_time, prediction, lstm_state = state
with tf.control_dependencies(
[tf.assert_equal(current_times, state_from_time)]):
# Subtract the mean and divide by the variance of the series. Slightly
# more efficient if done for a whole window (using the normalize_features
# argument to SequentialTimeSeriesModel).
transformed_values = self._scale_data(current_values)
# Use mean squared error across features for the loss.
predictions["loss"] = tf.reduce_mean(
(prediction - transformed_values) ** 2, axis=-1)
# Keep track of the new observation in model state. It won't be run
# through the LSTM until the next _imputation_step.
new_state_tuple = (current_times, transformed_values, lstm_state)
return (new_state_tuple, predictions)
def _prediction_step(self, current_times, state):
"""Advance the RNN state using a previous observation or prediction."""
_, previous_observation_or_prediction, lstm_state = state
lstm_output, new_lstm_state = self._lstm_cell_run(
inputs=previous_observation_or_prediction, state=lstm_state)
next_prediction = self._predict_from_lstm_output(lstm_output)
new_state_tuple = (current_times, next_prediction, new_lstm_state)
return new_state_tuple, {"mean": self._scale_back_data(next_prediction)}
def _imputation_step(self, current_times, state):
"""Advance model state across a gap."""
# Does not do anything special if we're jumping across a gap. More advanced
# models, especially probabilistic ones, would want a special case that
# depends on the gap size.
return state
def _exogenous_input_step(
self, current_times, current_exogenous_regressors, state):
"""Update model state based on exogenous regressors."""
raise NotImplementedError(
"Exogenous inputs are not implemented for this example.")
def train_and_predict(
csv_file_name=_DATA_FILE, training_steps=200, estimator_config=None):
"""Train and predict using a custom time series model."""
# Construct an Estimator from our LSTM model.
estimator = ts_estimators.TimeSeriesRegressor(
model=_LSTMModel(num_features=5, num_units=128),
optimizer=tf.train.AdamOptimizer(0.001), config=estimator_config)
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5))
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=4, window_size=32)
estimator.train(input_fn=train_input_fn, steps=training_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, :]
predicted_mean = numpy.squeeze(numpy.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
all_times = numpy.concatenate([times, predictions["times"]], axis=0)
return times, observed, all_times, predicted_mean
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
(observed_times, observations,
all_times, predictions) = train_and_predict()
pyplot.axvline(99, linestyle="dotted")
observed_lines = pyplot.plot(
observed_times, observations, label="Observed", color="k")
predicted_lines = pyplot.plot(
all_times, predictions, label="Predicted", color="b")
pyplot.legend(handles=[observed_lines[0], predicted_lines[0]],
loc="upper left")
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 |
fabriziocosta/eden_rna | eden_rna/RNAFolder.py | 1 | 9276 | #!/usr/bin/env python
import subprocess as sp
from itertools import tee
import numpy as np
import random
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics.pairwise import pairwise_kernels
from eden.sequence import Vectorizer as SeqVectorizer
from eden.graph import Vectorizer as GraphVectorizer
from eden_rna import sequence_dotbracket_to_graph
from eden_rna.io.fasta import seq_to_networkx
from eden_rna import rnafold
import logging
logger = logging.getLogger(__name__)
def normalize_seq(seq_pair):
header, seq = seq_pair
header = header.split('\n')[0]
header = header.split('_')[0]
return (header, seq)
def normalize_seqs(seqs):
for seq in seqs:
yield normalize_seq(seq)
def convert_seq_to_fasta_str(seq_pair):
header, seq = normalize_seq(seq_pair)
return '>%s\n%s\n' % (header, seq)
def extract_aligned_seed(header, out):
text = out.strip().split('\n')
seed = ''
for line in text:
if header in line:
seed += line.strip().split()[1]
return seed
def extract_struct_energy(out):
text = out.strip().split('\n')
struct = text[1].strip().split()[0]
energy = text[1].strip().split()[1:]
energy = ' '.join(energy).replace('(', '').replace(')', '')
energy = energy.split('=')[0]
energy = float(energy)
return struct, energy
def make_seq_struct(seq, struct):
clean_seq = ''
clean_struct = ''
for seq_char, struct_char in zip(seq, struct):
if seq_char == '-' and struct_char == '.':
pass
else:
clean_seq += seq_char
clean_struct += struct_char
return clean_seq, clean_struct
class Vectorizer(object):
def __init__(self,
complexity=None,
nbits=20,
sequence_vectorizer_complexity=3,
graph_vectorizer_complexity=2,
n_neighbors=5,
sampling_prob=.5,
n_iter=5,
min_energy=-5,
random_state=1):
random.seed(random_state)
if complexity is not None:
sequence_vectorizer_complexity = complexity
graph_vectorizer_complexity = complexity
self.sequence_vectorizer = SeqVectorizer(complexity=sequence_vectorizer_complexity,
nbits=nbits,
normalization=False,
inner_normalization=False)
self.graph_vectorizer = GraphVectorizer(complexity=graph_vectorizer_complexity, nbits=nbits)
self.n_neighbors = n_neighbors
self.sampling_prob = sampling_prob
self.n_iter = n_iter
self.min_energy = min_energy
self.nearest_neighbors = NearestNeighbors(n_neighbors=n_neighbors)
def fit(self, seqs):
# store seqs
self.seqs = list(normalize_seqs(seqs))
data_matrix = self.sequence_vectorizer.transform(self.seqs)
# fit nearest_neighbors model
self.nearest_neighbors.fit(data_matrix)
return self
def fit_transform(self, seqs, sampling_prob=None, n_iter=None):
seqs, seqs_ = tee(seqs)
return self.fit(seqs_).transform(seqs, sampling_prob=sampling_prob, n_iter=n_iter)
def transform(self, seqs, sampling_prob=None, n_iter=None):
seqs = list(normalize_seqs(seqs))
graphs_ = self.graphs(seqs)
data_matrix = self.graph_vectorizer.transform(graphs_)
return data_matrix
def graphs(self, seqs, sampling_prob=None, n_iter=None):
seqs = list(normalize_seqs(seqs))
if n_iter is not None:
self.n_iter = n_iter
if sampling_prob is not None:
self.sampling_prob = sampling_prob
for seq, neighs in self._compute_neighbors(seqs):
if self.n_iter > 1:
header, sequence, struct, energy = self._optimize_struct(seq, neighs)
else:
header, sequence, struct, energy = self._align_sequence_structure(seq, neighs)
graph = self._seq_to_eden(header, sequence, struct, energy)
yield graph
def _optimize_struct(self, seq, neighs):
structs = []
results = []
for i in range(self.n_iter):
new_neighs = self._sample_neighbors(neighs)
header, sequence, struct, energy = self._align_sequence_structure(seq, new_neighs)
results.append((header, sequence, struct, energy))
structs.append(struct)
instance_id = self._most_representative(structs)
selected = results[instance_id]
return selected
def _most_representative(self, structs):
# compute kernel matrix with sequence_vectorizer
data_matrix = self.sequence_vectorizer.transform(structs)
kernel_matrix = pairwise_kernels(data_matrix, metric='rbf', gamma=1)
# compute instance density as 1 over average pairwise distance
density = np.sum(kernel_matrix, 0) / data_matrix.shape[0]
# compute list of nearest neighbors
max_id = np.argsort(-density)[0]
return max_id
def _sample_neighbors(self, neighs):
out_neighs = []
# insert one element at random
out_neighs.append(random.choice(neighs))
# add other elements sampling without replacement
for neigh in neighs:
if random.random() < self.sampling_prob:
out_neighs.append(neigh)
return out_neighs
def _align_sequence_structure(self, seq, neighs, structure_deletions=False):
header = seq[0]
# if seq is in neigh rnaalifold will die... rm duplicates :)
neighs = [ (a,b) for a,b in neighs if b!=seq[1] ]
if len(neighs) < 1:
clean_seq, clean_struct = rnafold.rnafold_wrapper(seq[1])
energy = 0
logger.debug('Warning: no alignment for: %s' % seq)
else:
str_out = convert_seq_to_fasta_str(seq)
for neigh in neighs:
str_out += convert_seq_to_fasta_str(neigh)
cmd = 'echo "%s" | muscle -clwstrict -quiet' % (str_out)
out = sp.check_output(cmd, shell=True)
seed = extract_aligned_seed(header, out)
cmd = 'echo "%s" | RNAalifold --noPS 2>/dev/null' % (out)
out = sp.check_output(cmd, shell=True)
struct, energy = extract_struct_energy(out)
if energy > self.min_energy:
# use min free energy structure
clean_seq, clean_struct = rnafold.rnafold_wrapper(seq[1])
else:
clean_seq, clean_struct = make_seq_struct(seed, struct)
if structure_deletions:
clean_struct = self._clean_structure(clean_seq, clean_struct)
return header, clean_seq, clean_struct, energy
def _clean_structure(self, seq, stru):
'''
Parameters
----------
seq : basestring
rna sequence
stru : basestring
dotbracket string
Returns
-------
the structure given may not respect deletions in the sequence.
we transform the structure to one that does
'''
# find deletions in sequence
ids = []
for i, c in enumerate(seq):
if c == '-':
ids.append(i)
# remove brackets that dont have a partner anymore
stru = list(stru)
pairdict = self._pairs(stru)
for i in ids:
stru[pairdict[i]] = '.'
# delete deletions in structure
ids.reverse()
for i in ids:
del stru[i]
stru = ''.join(stru)
# removing obvious mistakes
stru = stru.replace("(())", "....")
stru = stru.replace("(.)", "...")
stru = stru.replace("(..)", "....")
return stru
def _pairs(self, struct):
'''
Parameters
----------
struct : basestring
Returns
-------
dictionary of ids in the struct, that are bond pairs
'''
unpaired = []
pairs = {}
for i, c in enumerate(struct):
if c == '(':
unpaired.append(i)
if c == ')':
partner = unpaired.pop()
pairs[i] = partner
pairs[partner] = i
return pairs
def _compute_neighbors(self, seqs):
seqs = list(seqs)
data_matrix = self.sequence_vectorizer.transform(seqs)
# find neighbors
distances, neighbors = self.nearest_neighbors.kneighbors(data_matrix)
# for each seq
for seq, neighs in zip(seqs, neighbors):
neighbor_seqs = [self.seqs[neigh] for neigh in neighs]
yield seq, neighbor_seqs
def _seq_to_eden(self, header, sequence, struct, energy):
graph = sequence_dotbracket_to_graph(seq_info=sequence, seq_struct=struct)
if graph.number_of_nodes() < 2:
graph = seq_to_networkx(header, sequence)
graph.graph['id'] = header
graph.graph['info'] = 'muscle+RNAalifold energy=%.3f' % (energy)
graph.graph['energy'] = energy
graph.graph['sequence'] = sequence
return graph
| mit |
Achuth17/scikit-learn | sklearn/feature_extraction/image.py | 263 | 17600 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils.fixes import astype
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = astype(mask, dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis
n_y : int
Dimension in y axis
n_z : int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
enigmampc/catalyst | catalyst/utils/pandas_utils.py | 2 | 7178 | """
Utilities for working with pandas objects.
"""
from contextlib import contextmanager
from copy import deepcopy
from itertools import product
import operator as op
import warnings
import pandas as pd
from distutils.version import StrictVersion
pandas_version = StrictVersion(pd.__version__)
def july_5th_holiday_observance(datetime_index):
return datetime_index[datetime_index.year != 2013]
def explode(df):
"""
Take a DataFrame and return a triple of
(df.index, df.columns, df.values)
"""
return df.index, df.columns, df.values
def _time_to_micros(time):
"""Convert a time into microseconds since midnight.
Parameters
----------
time : datetime.time
The time to convert.
Returns
-------
us : int
The number of microseconds since midnight.
Notes
-----
This does not account for leap seconds or daylight savings.
"""
seconds = time.hour * 60 * 60 + time.minute * 60 + time.second
return 1000000 * seconds + time.microsecond
_opmap = dict(zip(
product((True, False), repeat=3),
product((op.le, op.lt), (op.le, op.lt), (op.and_, op.or_)),
))
def mask_between_time(dts, start, end, include_start=True, include_end=True):
"""Return a mask of all of the datetimes in ``dts`` that are between
``start`` and ``end``.
Parameters
----------
dts : pd.DatetimeIndex
The index to mask.
start : time
Mask away times less than the start.
end : time
Mask away times greater than the end.
include_start : bool, optional
Inclusive on ``start``.
include_end : bool, optional
Inclusive on ``end``.
Returns
-------
mask : np.ndarray[bool]
A bool array masking ``dts``.
See Also
--------
:meth:`pandas.DatetimeIndex.indexer_between_time`
"""
# This function is adapted from
# `pandas.Datetime.Index.indexer_between_time` which was originally
# written by Wes McKinney, Chang She, and Grant Roch.
time_micros = dts._get_time_micros()
start_micros = _time_to_micros(start)
end_micros = _time_to_micros(end)
left_op, right_op, join_op = _opmap[
bool(include_start),
bool(include_end),
start_micros <= end_micros,
]
return join_op(
left_op(start_micros, time_micros),
right_op(time_micros, end_micros),
)
def find_in_sorted_index(dts, dt):
"""
Find the index of ``dt`` in ``dts``.
This function should be used instead of `dts.get_loc(dt)` if the index is
large enough that we don't want to initialize a hash table in ``dts``. In
particular, this should always be used on minutely trading calendars.
Parameters
----------
dts : pd.DatetimeIndex
Index in which to look up ``dt``. **Must be sorted**.
dt : pd.Timestamp
``dt`` to be looked up.
Returns
-------
ix : int
Integer index such that dts[ix] == dt.
Raises
------
KeyError
If dt is not in ``dts``.
"""
ix = dts.searchsorted(dt)
if dts[ix] != dt:
raise LookupError("{dt} is not in {dts}".format(dt=dt, dts=dts))
return ix
def nearest_unequal_elements(dts, dt):
"""
Find values in ``dts`` closest but not equal to ``dt``.
Returns a pair of (last_before, first_after).
When ``dt`` is less than any element in ``dts``, ``last_before`` is None.
When ``dt`` is greater any element in ``dts``, ``first_after`` is None.
``dts`` must be unique and sorted in increasing order.
Parameters
----------
dts : pd.DatetimeIndex
Dates in which to search.
dt : pd.Timestamp
Date for which to find bounds.
"""
if not dts.is_unique:
raise ValueError("dts must be unique")
if not dts.is_monotonic_increasing:
raise ValueError("dts must be sorted in increasing order")
if not len(dts):
return None, None
sortpos = dts.searchsorted(dt, side='left')
try:
sortval = dts[sortpos]
except IndexError:
# dt is greater than any value in the array.
return dts[-1], None
if dt < sortval:
lower_ix = sortpos - 1
upper_ix = sortpos
elif dt == sortval:
lower_ix = sortpos - 1
upper_ix = sortpos + 1
else:
lower_ix = sortpos
upper_ix = sortpos + 1
lower_value = dts[lower_ix] if lower_ix >= 0 else None
upper_value = dts[upper_ix] if upper_ix < len(dts) else None
return lower_value, upper_value
def timedelta_to_integral_seconds(delta):
"""
Convert a pd.Timedelta to a number of seconds as an int.
"""
return int(delta.total_seconds())
def timedelta_to_integral_minutes(delta):
"""
Convert a pd.Timedelta to a number of minutes as an int.
"""
return timedelta_to_integral_seconds(delta) // 60
@contextmanager
def ignore_pandas_nan_categorical_warning():
with warnings.catch_warnings():
# Pandas >= 0.18 doesn't like null-ish values in catgories, but
# avoiding that requires a broader change to how missing values are
# handled in pipeline, so for now just silence the warning.
warnings.filterwarnings(
'ignore',
category=FutureWarning,
)
yield
_INDEXER_NAMES = [
'_' + name for (name, _) in pd.core.indexing.get_indexers_list()
]
def clear_dataframe_indexer_caches(df):
"""
Clear cached attributes from a pandas DataFrame.
By default pandas memoizes indexers (`iloc`, `loc`, `ix`, etc.) objects on
DataFrames, resulting in refcycles that can lead to unexpectedly long-lived
DataFrames. This function attempts to clear those cycles by deleting the
cached indexers from the frame.
Parameters
----------
df : pd.DataFrame
"""
for attr in _INDEXER_NAMES:
try:
delattr(df, attr)
except AttributeError:
pass
def categorical_df_concat(df_list, inplace=False):
"""
Prepare list of pandas DataFrames to be used as input to pd.concat.
Ensure any columns of type 'category' have the same categories across each
dataframe.
Parameters
----------
df_list : list
List of dataframes with same columns.
inplace : bool
True if input list can be modified. Default is False.
Returns
-------
concatenated : df
Dataframe of concatenated list.
"""
if not inplace:
df_list = deepcopy(df_list)
# Assert each dataframe has the same columns/dtypes
df = df_list[0]
if not all([(df.dtypes.equals(df_i.dtypes)) for df_i in df_list[1:]]):
raise ValueError("Input DataFrames must have the same columns/dtypes.")
categorical_columns = df.columns[df.dtypes == 'category']
for col in categorical_columns:
new_categories = sorted(
set().union(
*(frame[col].cat.categories for frame in df_list)
)
)
with ignore_pandas_nan_categorical_warning():
for df in df_list:
df[col].cat.set_categories(new_categories, inplace=True)
return pd.concat(df_list)
| apache-2.0 |
google-research/tensorflow-coder | tf_coder/benchmarks/autopandas_benchmarks.py | 1 | 23124 | # Copyright 2021 The TF-Coder Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Benchmarks adapted from AutoPandas's benchmarks.
Autopandas benchmarks are at:
https://github.com/rbavishi/autopandas/blob/master/autopandas_v2/evaluation/benchmarks/stackoverflow.py
"""
# Avoid wrapping URLs and target programs to ease clicking and copying.
# pylint: disable=line-too-long
# Every function in this module takes no arguments and creates a benchmark.
# pylint: disable=missing-docstring,g-doc-return-or-yield
from tf_coder.benchmarks import benchmark
def autopandas_01():
# Turned the desired indices [0, 2, 4] into an input.
examples = [
benchmark.Example(
inputs=[
[[5, 6, 7, 8, 9], [10, 11, 12, 13, 14]],
[0, 2, 4],
],
output=[[5, 7, 9], [10, 12, 14]]
),
]
constants = []
description = '' # No description for AutoPandas benchmarks.
target_program = 'tf.gather(in1, in2, axis=1)'
source = 'SO_11881165_depth1'
return benchmark.Benchmark(examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name='autopandas_01')
def autopandas_02():
"""AutoPandas benchmark.
The DataFrame input:
value1 value2
group1 group2
a c 1.1 7.1
c 2.0 8.0
d 3.0 9.0
b d 4.0 10.0
d 5.0 11.0
e 6.0 12.0
The DataFrame output:
value1 value2
group2
c 1.1 7.1
c 2.0 8.0
d 3.0 9.0
Notice that "c", "d", and "e" are just treated as data, so we'll replace them
with 1, 2, and 3, respectively. "group1" acts as a third dimension, so our
input tensor will be 3D. We'll also make "group1" the innermost axis, to make
the problem not super trivial in TensorFlow.
Finally, I changed some numbers to rule out tf.reduce_min(axis=2).
"""
examples = [
benchmark.Example(
inputs=[
[[[1, 2], [1, 2], [2, 3]], # group2
[[1.1, 4], [2, 1], [3, 6]], # value1
[[7.1, 10], [8, 11], [9, 7]]], # value2
],
output=[[1, 1, 2],
[1.1, 2, 3],
[7.1, 8, 9]]
),
]
constants = []
description = '' # No description for AutoPandas benchmarks.
# Note that we don't have ops for indexing/slicing axis 2. But, TF-Coder will
# find a workaround.
target_program = 'in1[:, :, 0]'
source = 'SO_11941492_depth1'
return benchmark.Benchmark(examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name='autopandas_02')
def autopandas_03():
# The "series" and "step" columns only serve to identify where the data should
# move to. The number of unique "series" and "step" values determine the
# output's shape, so we provide them (3 and 5) as constants. The real data is
# in the "value" column, so it's a 1D tensor. This is simply a tensor reshape
# and transpose.
examples = [
benchmark.Example(
inputs=[
[1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010,
1011, 1012, 1013, 1014],
],
output=[[1000, 1003, 1006, 1009, 1012],
[1001, 1004, 1007, 1010, 1013],
[1002, 1005, 1008, 1011, 1014]]
),
]
constants = [3, 5]
description = '' # No description for AutoPandas benchmarks.
target_program = 'tf.transpose(tf.reshape(in1, (5, 3)))'
source = 'SO_13647222_depth1'
return benchmark.Benchmark(examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name='autopandas_03')
def autopandas_04():
# This problem boils down to "select rows of the table where row['line_race']
# is nonzero". We use a simpler example to describe the same problem idea.
examples = [
benchmark.Example(
inputs=[
[[1, 2, 3, 4, 5],
[9, 8, 7, 6, 5],
[3, 0, 2, 5, 8],
[8, 8, 6, 3, 2],
[2, 0, 7, 7, 3],
[9, 0, 3, 2, 7],
[1, 3, 8, 9, 4]],
],
output=[[1, 2, 3, 4, 5],
[9, 8, 7, 6, 5],
[8, 8, 6, 3, 2],
[1, 3, 8, 9, 4]],
),
]
constants = []
description = '' # No description for AutoPandas benchmarks.
target_program = 'tf.boolean_mask(in1, tf.cast(in1[:, 1], tf.bool))'
source = 'SO_18172851_depth1'
return benchmark.Benchmark(examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name='autopandas_04')
def autopandas_05():
# The problem is to sort a table using values in one column. Their code that
# constructs the output actually sorts by 3 columns, but their example has
# unique values for the first column, so the other two columns don't affect
# the result at all. Instead of having dates and strings and numbers, we just
# use numbers. Sort by column 0 in increasing order.
examples = [
benchmark.Example(
inputs=[
[[6, 3, 7, 8, 4],
[8, 9, 4, 5, 3],
[1, 5, 3, 6, 9],
[2, 1, 4, 3, 2],
[7, 9, 6, 2, 7],
[5, 8, 0, 4, 2]],
],
output=[[1, 5, 3, 6, 9],
[2, 1, 4, 3, 2],
[5, 8, 0, 4, 2],
[6, 3, 7, 8, 4],
[7, 9, 6, 2, 7],
[8, 9, 4, 5, 3]]
),
]
constants = []
description = '' # No description for AutoPandas benchmarks.
target_program = 'tf.gather(in1, tf.argsort(in1[:, 0], stable=True))'
source = 'SO_49583055_depth1'
return benchmark.Benchmark(examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name='autopandas_05')
# SO_49592930_depth1 is out of scope. It involves taking the union of two
# mappings from keys to values, where one such mapping takes precedence over the
# other in case of overlapping keys. TensorFlow is not designed to handle
# mappings, and I (Kensen) cannot think of a way to do this using TensorFlow.
# SO_49572546_depth1 is exactly the same as the one above, except for the
# ordering of two inputs and the data. It's out of scope for the same reason.
@benchmark.ignore("This isn't in AutoPandas's table of results, idk why")
def autopandas_06_ignored():
"""AutoPandas benchmark.
The input DataFrame:
X Y Z
4 X1 Y2 Z3
5 X1 Y1 Z1
6 X1 Y1 Z1
7 X1 Y1 Z2
The output DataFrame:
Z Z1 Z2 Z3
Y
Y1 1.0 1.0 NaN
Y2 NaN NaN 1.0
Basically, the Ys are row indices and the Zs are column indices. The X1 does
not really matter, we just want a boolean output table (1.0=True, NaN=False)
that identifies which Y and Z pairs appeared in the input table.
Their example is tiny so I expanded it.
This task also doesn't appear in their table of results?
"""
examples = [
benchmark.Example(
inputs=[
# Y Z
[[1, 2],
[0, 0],
[0, 0],
[0, 1],
[4, 2],
[4, 3],
[4, 2],
[2, 1]],
],
output=[[True, True, False, False],
[False, False, True, False],
[False, True, False, False],
[False, False, False, False],
[False, False, True, True]]
),
]
constants = []
description = '' # No description for AutoPandas benchmarks.
# TODO(kshi): Note that tf.scatter_nd is not yet supported by TF-Coder!!
target_program = 'tf.cast(tf.scatter_nd(in1, updates=tf.ones(8), shape=[5, 4]), tf.bool)'
source = 'SO_12860421_depth1'
return benchmark.Benchmark(examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name='autopandas_06_ignored')
def autopandas_06():
# This is very similar to autopandas_03, they're both pivot tables.
examples = [
benchmark.Example(
inputs=[
[4, 5, 6, 7],
],
output=[[4, 5], [6, 7]]
),
]
constants = [2]
description = '' # No description for AutoPandas benchmarks.
target_program = 'tf.reshape(in1, (2, 2))'
source = 'SO_13261175_depth1'
return benchmark.Benchmark(examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name='autopandas_06')
# SO_13793321_depth1 is out of scope. It's merging two tables, keeping the rows
# with matching values in a particular column. This isn't something TensorFlow
# was designed for.
def autopandas_07():
# This is exactly the same as autopandas_05 (sort rows based on a column), but
# with different data. Like in the other task, their code for creating the
# output actually sorts using multiple columns, but their data contains unique
# values. I'm just going to mash my keyboard again to get different data.
examples = [
benchmark.Example(
inputs=[
[[8, 5, 9, 3],
[3, 6, 6, 8],
[1, 2, 3, 4],
[7, 6, 5, 4],
[4, 7, 2, 6]]
],
output=[[1, 2, 3, 4],
[3, 6, 6, 8],
[4, 7, 2, 6],
[7, 6, 5, 4],
[8, 5, 9, 3]]
),
]
constants = []
description = '' # No description for AutoPandas benchmarks.
target_program = 'tf.gather(in1, tf.argsort(in1[:, 0], stable=True))'
source = 'SO_14085517_depth1'
return benchmark.Benchmark(examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name='autopandas_07')
def autopandas_08():
# I added 3 extra rows so that the solution isn't just slicing, and chose the
# numbers to test the boundary of the condition row[0] > 1. The boundary (1)
# is given as a constant.
examples = [
benchmark.Example(
inputs=[
[[5, 7], [6, 8], [-1, 9], [-2, 10], [2, 11], [1, 12], [3, -3]],
],
output=[[5, 7], [6, 8], [2, 11], [3, -3]]
),
]
constants = [1]
description = '' # No description for AutoPandas benchmarks.
target_program = 'tf.boolean_mask(in1, tf.greater(in1[:, 0], 1))'
source = 'SO_11418192_depth2'
return benchmark.Benchmark(examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name='autopandas_08')
# SO_49567723_depth2 is out of scope. It's another table merge.
# SO_49987108_depth2 is out of scope. It uses DataFrame.fillna(method='ffill'),
# which has no equivalent (afaik) in TensorFlow.
def autopandas_09():
# This is yet another sort. But this time, the data actually does require
# sorting by both columns! I have constructed the example to reflect this.
# Also, this problem (sort by 2 columns) is the same as stackoverflow_19.
examples = [
benchmark.Example(
inputs=[
[[8, 5, 9, 3],
[3, 6, 6, 8],
[7, 9, 3, 4],
[7, 6, 5, 4],
[3, 7, 2, 6]]
],
output=[[3, 6, 6, 8],
[3, 7, 2, 6],
[7, 6, 5, 4],
[7, 9, 3, 4],
[8, 5, 9, 3]]
),
]
constants = []
description = '' # No description for AutoPandas benchmarks.
target_program = 'tf.gather(tf.gather(in1, tf.argsort(in1[:, 1], stable=True)), tf.argsort(tf.gather(in1, tf.argsort(in1[:, 1], stable=True))[:, 0], stable=True))'
source = 'SO_13261691_depth2'
return benchmark.Benchmark(examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name='autopandas_09')
# SO_13659881_depth2 is out of scope. It involves counting how many times each
# row appears, and then appending that count to the deduplicated rows.
# TensorFlow is not designed for deduplicating rows.
def autopandas_10():
# Drop NaN. NaN is given as a constant.
examples = [
benchmark.Example(
inputs=[
[float('nan'), 11, 12, float('nan'), 16, 18],
],
output=[11, 12, 16, 18]
),
]
constants = [float('nan')]
description = '' # No description for AutoPandas benchmarks.
# TODO(kshi): We don't support tf.math.is_nan() and tf.math.logical_not()!!
# Once again, TF-Coder is better than me. TF-Coder replaces
# `tf.math.logical_not(tf.math.is_nan(in1))` with `tf.equal(in1, in1)` because
# NaN is the only thing that's not equal to itself.
target_program = 'tf.cast(tf.boolean_mask(in1, tf.math.logical_not(tf.math.is_nan(in1))), tf.int32)'
source = 'SO_13807758_depth2'
return benchmark.Benchmark(examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name='autopandas_10')
# SO_34365578_depth2 is out of scope. It involves grouping values and doing a
# sum aggregation for each group. TensorFlow is not designed for grouping values
# like this.
# SO_10982266_depth3 is also out of scope, involving a grouping and then mean
# aggregation for each group.
def autopandas_11():
# Transpose and prepend the row index. I changed the data to not have such
# obvious patterns.
examples = [
benchmark.Example(
inputs=[
[[1, 4, 2, 7, 6], [20, 10, 50, 40, 30]],
],
output=[[0, 1, 20],
[1, 4, 10],
[2, 2, 50],
[3, 7, 40],
[4, 6, 30]]
),
]
constants = []
description = '' # No description for AutoPandas benchmarks.
target_program = 'tf.transpose(tf.concat((tf.expand_dims(tf.range(5), axis=0), in1), axis=0))'
source = 'SO_11811392_depth3'
return benchmark.Benchmark(examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name='autopandas_11')
def autopandas_12():
# For each pair, compute pair[1] / (pair[0] + pair[1]).
# I added some numbers to avoid in1[:, :, 1] == tf.reduce_max(in1, axis=-1).
examples = [
benchmark.Example(
inputs=[
[[[2, 8], [2, 6], [6, 2]],
[[0, 2], [1, 1], [2, 0]]],
],
output=[[0.8, 0.75, 0.25],
[1.0, 0.5, 0.0]]
),
]
constants = []
description = '' # No description for AutoPandas benchmarks.
target_program = 'tf.cast(tf.divide(in1[:, :, 1], tf.reduce_sum(in1, axis=2)), tf.float32)'
source = 'SO_49581206_depth3'
return benchmark.Benchmark(examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name='autopandas_12')
def autopandas_13():
# Select rows where row[1] is in some set. I'm matching their data where
# possible.
examples = [
benchmark.Example(
inputs=[
[[101, 0, 11, 0],
[102, 1, 12, 4],
[103, 2, 13, 2],
[104, 3, 14, 8],
[105, 4, 15, 4],
[106, 5, 16, 5],
[107, 6, 17, 4],
[108, 7, 18, 7],
[109, 8, 19, 7],
[110, 9, 20, 4]],
[4, 2, 6],
],
output=[[103, 2, 13, 2],
[105, 4, 15, 4],
[107, 6, 17, 4]]
),
]
constants = []
description = '' # No description for AutoPandas benchmarks.
target_program = 'tf.boolean_mask(in1, tf.reduce_any(tf.equal(in1[:, 1], tf.expand_dims(in2, axis=1)), axis=0))'
source = 'SO_12065885_depth3'
return benchmark.Benchmark(examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name='autopandas_13')
def autopandas_14():
# The intended solution is to do a merge, but the example actually has nothing
# to merge, so all that happens is an extra column is created filled with NaN.
# Why are there so many problems where the example completely underspecifies
# the intended transformation? Instead of ignoring the problem because it has
# a merge (out of scope), let's just try to append a NaN column.
examples = [
benchmark.Example(
inputs=[
[[1, 0, 1, 2],
[1, 1, 3, 4],
[2, 0, 1, 2],
[2, 1, 3, 4]],
# They have another input that is used to provide the column
# header for the new NaN-filled column. Tensors don't have column
# headers so this input is useless. Omit it, or else TF-Coder will
# be required to use it.
],
output=[[1, 0, 1, 2, float('nan')],
[1, 1, 3, 4, float('nan')],
[2, 0, 1, 2, float('nan')],
[2, 1, 3, 4, float('nan')]]
),
]
constants = [float('nan')]
description = '' # No description for AutoPandas benchmarks.
target_program = 'tf.concat((tf.cast(in1, tf.float32), tf.fill([4, 1], float("nan"))), axis=1)'
source = 'SO_13576164_depth3'
return benchmark.Benchmark(examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name='autopandas_14')
# SO_14023037_depth3 is out of scope. It uses DataFrame.fillna(method='bfill'),
# which has no equivalent (afaik) in TensorFlow.
def autopandas_15():
# The task is to group by everything except one numeric column, and then
# cumsum over that numeric column. Grouping is out-of-scope, and the groups
# essentially become row headers anyway. The only part of this that is doable
# in TensorFlow is the cumsum part. I changed the example because it was too
# simple.
examples = [
benchmark.Example(
inputs=[
[1, 1, 2, 1, 3, 2],
],
output=[1, 2, 4, 5, 8, 10]
),
]
constants = []
description = '' # No description for AutoPandas benchmarks.
target_program = 'tf.cumsum(in1)'
source = 'SO_53762029_depth3'
return benchmark.Benchmark(examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name='autopandas_15')
# SO_21982987_depth3 is out of scope, involving a grouping and then mean
# aggregation for each group.
def autopandas_16():
# Reduce mean across columns, and then ignore column 0.
examples = [
benchmark.Example(
inputs=[
[[0, 6, 0],
[3, 101, 14],
[0, 91, 6],
[5, 15, 0]],
],
output=[53.25, 5.00]
),
]
constants = []
description = '' # No description for AutoPandas benchmarks.
target_program = 'tf.reduce_mean(tf.cast(in1, tf.float32), axis=0)[1:]'
source = 'SO_39656670_depth3'
return benchmark.Benchmark(examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name='autopandas_16')
# SO_23321300_depth3 is out of scope, involving a grouping and then mean
# aggregation for each group.
# A template for easy copy/pasting. Copying an existing benchmark and replacing
# parts of it will lead to a state where the benchmark is half-correct, but not
# obviously so. Copy this template instead when creating new benchmarks.
"""
def autopandas_NUMBER():
examples = [
benchmark.Example(
inputs=[
INPUT_1,
INPUT_2,
],
output=OUTPUT
),
]
constants = [CONSTANTS]
description = '' # No description for AutoPandas benchmarks.
target_program = 'SOLUTION_PROGRAM'
source = 'PROBLEM_SOURCE'
return benchmark.Benchmark(examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name='autopandas_NUMBER')
""" # pylint: disable=pointless-string-statement
| apache-2.0 |
sarahgrogan/scikit-learn | sklearn/tests/test_discriminant_analysis.py | 5 | 11057 | try:
# Python 2 compat
reload
except NameError:
# Regular Python 3+ import
from importlib import reload
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
# Data is just 9 separable points in the plane
X6 = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y6 = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y7 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X7 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8, 3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct
# values for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = LinearDiscriminantAnalysis(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_lsqr = LinearDiscriminantAnalysis(solver="lsqr")
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = LinearDiscriminantAnalysis(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_explained_variance_ratio():
# Test if the sum of the normalized eigen vectors values equals 1
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_eigen.fit(X, y)
assert_almost_equal(clf_lda_eigen.explained_variance_ratio_.sum(), 1.0, 3)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = LinearDiscriminantAnalysis(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
assert_array_equal(y_pred, y6)
# Assure that it works with 1D data
y_pred1 = clf.fit(X7, y6).predict(X7)
assert_array_equal(y_pred1, y6)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X7)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y6)
y_log_proba_pred1 = clf.predict_log_proba(X7)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X6, y7).predict(X6)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y7))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X6, y4)
def test_qda_priors():
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = QuadraticDiscriminantAnalysis(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X6, y6).predict(X6)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = QuadraticDiscriminantAnalysis().fit(X6, y6)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = QuadraticDiscriminantAnalysis().fit(X6, y6, store_covariances=True)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = QuadraticDiscriminantAnalysis()
with ignore_warnings():
y_pred = clf.fit(X2, y6).predict(X2)
assert_true(np.any(y_pred != y6))
# adding a little regularization fixes the problem
clf = QuadraticDiscriminantAnalysis(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y6)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y6)
# Case n_samples_in_a_class < n_features
clf = QuadraticDiscriminantAnalysis(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
def test_deprecated_lda_qda_deprecation():
def import_lda_module():
import sklearn.lda
# ensure that we trigger DeprecationWarning even if the sklearn.lda
# was loaded previously by another test.
reload(sklearn.lda)
return sklearn.lda
lda = assert_warns(DeprecationWarning, import_lda_module)
assert lda.LDA is LinearDiscriminantAnalysis
def import_qda_module():
import sklearn.qda
# ensure that we trigger DeprecationWarning even if the sklearn.qda
# was loaded previously by another test.
reload(sklearn.qda)
return sklearn.qda
qda = assert_warns(DeprecationWarning, import_qda_module)
assert qda.QDA is QuadraticDiscriminantAnalysis
| bsd-3-clause |
rvraghav93/scikit-learn | examples/model_selection/plot_randomized_search.py | 47 | 3287 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from scipy.stats import randint as sp_randint
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(results, n_top=3):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(2, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.cv_results_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [2, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.cv_results_['params'])))
report(grid_search.cv_results_)
| bsd-3-clause |
AnasGhrab/scikit-learn | examples/calibration/plot_calibration_curve.py | 225 | 5903 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
0todd0000/rft1d | rft1d/examples/val_max_4_anova1_0d.py | 2 | 1863 |
from math import sqrt
import numpy as np
from scipy import stats
from matplotlib import pyplot
eps = np.finfo(float).eps
def here_anova1(Y, X, X0, Xi, X0i, df):
Y = np.matrix(Y).T
### estimate parameters:
b = Xi*Y
eij = Y - X*b
R = eij.T*eij
### reduced design:
b0 = X0i*Y
eij0 = Y - X0*b0
R0 = eij0.T*eij0
### compute F statistic:
F = ((np.diag(R0)-np.diag(R))/df[0]) / (np.diag(R+eps)/df[1])
return float(F)
def here_design_matrices(nResponses, nGroups):
nTotal = sum(nResponses)
X = np.zeros((nTotal,nGroups))
i0 = 0
for i,n in enumerate(nResponses):
X[i0:i0+n,i] = 1
i0 += n
X = np.matrix(X)
X0 = np.matrix(np.ones(nTotal)).T #reduced design matrix
Xi,X0i = np.linalg.pinv(X), np.linalg.pinv(X0) #pseudo-inverses
return X,X0,Xi,X0i
#(0) Set parameters:
np.random.seed(0)
nResponses = 6,8,5 #responses per group
nIterations = 5000
### derived parameters:
nGroups = len(nResponses)
nTotal = sum(nResponses)
df = nGroups-1, nTotal-nGroups
X,X0,Xi,X0i = here_design_matrices(nResponses, nGroups)
#(1) Generate random data and compute test statistic:
F = []
for i in range(nIterations):
y = np.random.randn(nTotal)
F.append( here_anova1(y, X, X0, Xi, X0i, df) )
F = np.asarray(F)
#(2) Survival functions:
heights = np.linspace(1, 10, 21)
sf = np.array( [ (F>h).mean() for h in heights] )
sfE = stats.f.sf(heights, df[0], df[1])
#(3) Plot results:
pyplot.close('all')
ax = pyplot.axes()
ax.plot(heights, sf, 'o', label='Simulated')
ax.plot(heights, sfE, '-', label='Theoretical')
ax.set_xlabel('$u$', size=20)
ax.set_ylabel('$P (F > u)$', size=20)
ax.legend()
ax.set_title('ANOVA validation (0D)', size=20)
pyplot.show()
| gpl-3.0 |
ssaeger/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
itahsieh/sparx-alpha | bin/sparx-validate-leiden.py | 1 | 23217 | #! /usr/bin/env python
# Validate non-LTE excitation calculation by solving the water
# radiative transfer benchmark problem presented at the RT workshop
# at Leiden in 2004. This problem considers the excitation of
# 2-level ortho-water in an isothermal, uniform gas cloud. Two
# dynamical cases are considered -- a static cloud and a dynmaical
# cloud where the cloud is expanding with a linear velocity gradient
# of 100 km/s/pc.
#
# This program does the necessary work to reproduce all the results
# written in Dr. David Neufeld's document for benchmarking non-LTE
# line radiative transfer codes.
#
# CAVEAT: Be ware that the line width used in Neufeld's documents
# is 1 km/s FWHM, and if a direct comparison is to be made, some
# adjustments must be applied first.
# Global filename prefix
NAME = "leiden"
# Some necessary imports
from os.path import exists
from math import sqrt, log
import sparx
from sparx import tasks, utils, inputs, physics, miriad, grid
Unit = physics.Units
Cnst = physics.Const
import pylab as pl
import numpy as np
# Setup plotting
import matplotlib.font_manager
LEGND_PROP = matplotlib.font_manager.FontProperties(size=8)
# Image type
IMG_TYP = "png"
class Cloud(object):
"""
# The uniform cloud model
"""
# Cloud parameters
r = 0.1 * Unit.pc # [m] cloud radius
n_H2 = 1e4 * 1e6 # [m^-3] cloud density
T_k = 40.0 # [K] cloud kinetic temperature
# Distance from source [m]
dist = 2000.0 * Unit.pc
# Calculate projected beam radius [m]
fwhm = (10.0 * Unit.pc / dist) # rad
sigma = fwhm / (2.0 * sqrt(2.0 * log(2.0)))
R_beam = sigma * dist
# Channel number and width
chan_n = 100
chan_width = 0.05 # km/s
def __init__(self, molec):
# The model molecule should contain only two states
#self.mol = physics.Molecule("o-h2o_2lev")
self.mol = physics.Molecule(molec)
# Calculate transition frequency and wavelength
self.freq = self.mol.line_freq[0] # Hz
self.lmda = Cnst.c / self.freq # m
return
################################################################################
class Static(Cloud):
"""
Analytic solution for the static cloud:
Since there is no accurate analytic solution for the excitation of a
static cloud with arbitrary molecular abundance, only two limiting cases
are solved:
a) the optcially thick LTE limit
b) the optically thin case
Formulating detailed balance in terms of photon escape probability,
we may arrive at the following solution for the ratio of the upper
and lower level populations
n_u / n_l = u / (1 + s * beta)
where u = (g_u / g_l) * exp(-(E_u - E_l) / (k * T_k))
s = n_cr / n_H2
beta = escape probability
"""
# Filename prefix
name = NAME+"-static"
def __init__(self, ndiv, Xmol_list, molec, tcmb="0K", s1d_path=None, s3d_path=None, r1d_path=None, fwhm=0.32e3):
"""
Initialize static problem: the only free parameter is
the fwhm of line width.
"""
# Initialize parent class
Cloud.__init__(self, molec)
# Number of divisions along radial direction
self.ndiv = ndiv
# List of molecular abundances
self.Xmol_list = Xmol_list
# Background radiation
self.tcmb = tcmb
# Paths to calculation results
self.s1d_path = s1d_path
self.s3d_path = s3d_path
self.r1d_path = r1d_path
# Setup file names
if s1d_path != None:
name = s1d_path+"/"+"%s-s1d"%(self.name)
self.s1d_srcs = [name+"-X=%.2e.src"%Xmol for Xmol in Xmol_list]
self.s1d_pops = [name+"-X=%.2e.pop"%Xmol for Xmol in Xmol_list]
self.s1d_imgs = [name+"-X=%.2e.img"%Xmol for Xmol in Xmol_list]
self.s1d_cnvs = [name+"-X=%.2e.cnv"%Xmol for Xmol in Xmol_list]
self.s1d_tmbs = [name+"-X=%.2e.tmb"%Xmol for Xmol in Xmol_list]
if s3d_path != None:
name = s3d_path+"/"+"%s-s3d"%(self.name)
self.s3d_srcs = [name+"-X=%.2e.src"%Xmol for Xmol in Xmol_list]
self.s3d_pops = [name+"-X=%.2e.pop"%Xmol for Xmol in Xmol_list]
self.s3d_imgs = [name+"-X=%.2e.img"%Xmol for Xmol in Xmol_list]
self.s3d_cnvs = [name+"-X=%.2e.cnv"%Xmol for Xmol in Xmol_list]
self.s3d_tmbs = [name+"-X=%.2e.tmb"%Xmol for Xmol in Xmol_list]
# Free parameters:
# Convert line width from FWHM to sqrt(2) * sigma:
# FWHM = 2 * sqrt(2 * log(2)) * sigma
self.width = fwhm / (2.0 * sqrt(log(2.0))) # m/s
self.nu_D = physics.Doppler_vel2frq(self.freq, self.width) - self.freq # Hz
# Excitation parameters:
# The 'u' parameter for excitation: this should be 0.512 for the H2O model
self.exc_u = self.mol.get_boltzmann_ratio(0, self.T_k)
# The 's' parameter for excitation:
# This should be 1588 for the H2O model
self.exc_s = self.mol.col[0].get_crit_dens(0, self.T_k) / self.n_H2
# Level populations:
# Upper level fractional density at optically thick condition (beta = 0)
# n_u / n_l = u / (1 + s * beta)
# => n_u / (1 - n_u) = u / 1
# => n_u = u / (u + 1)
# This should be 0.339 for the H2O model
self.n_u_thick = self.exc_u / (self.exc_u + 1.0)
# Lower level fractional density at optically thin condition (beta = 1)
# n_u / n_l = u / (1 + s * beta)
# => n_u / n_l = u / (1 + s)
# This should be 3.22e-4 for the H2O model
self.n_u_thin = 1.0 / (1.0 + (1.0 + self.exc_s) / self.exc_u)
return
def phi_nu(self, nu):
"""
Line profile function
"""
return phys.gaussian_fprofile(nu, self.freq, self.nu_D)
def calc_luminosity(self, X_mol):
"""
Calculate theoretical luminosity assuming collisional de-excitation
can be neglected.
L = h * nu * q_12 * n_H2 * n_H2O * (4/3 * pi * r**3)
"""
return Cnst.h * self.freq * self.mol.col[0].get_up_rate(0, self.T_k) * self.n_H2 * (X_mol * self.n_H2) * (Cnst.pi * 4.0 / 3.0) * self.r**3.0
def _pipeline_sparx(self, Xmol, src, pop, img, cnv, tmb, genimg=True):
"""
Pipeline for generating SPARX solutions
"""
# Calculate excitation
if not exists(pop):
#tasks.task_amc(source=src, molec='o-h2o_2lev', out=pop)
utils.call("mpirun -np %d sparx --parallel run task_amc source='%s' molec='%s' out='%s' fixiter=10 lte=%s trace=%s nrays=%d snr=%.0f tolerance=%.2e maxiter=%d"%(NPROC, src, self.mol.name, pop, FROMLTE, TRACE, NRAYS, SNR, TOLERANCE, MAXITER))
#utils.call("sparx run task_amc source='%s' molec='o-h2o_2lev' out='%s'"%(src, pop))
if genimg:
# Generate synthesized map
if not exists(img):
tasks.task_lineobs(
source=pop,
chan="[%d,'%gkms^-1']"%(self.chan_n, self.chan_width),
dist="%gpc"%(self.dist / Unit.pc),
line=0,
out=img,
npix="[256,256]",
cell="['0.5asec','0.5asec']",
unit="JY/PIXEL")
# To avoid mysterious 'invalid argument' bug in miriad
from time import sleep
sleep(0.5)
# Convolve with beam
if not exists(cnv):
miriad.convol(img, cnv, self.fwhm)
# To avoid mysterious 'invalid argument' bug in miriad
from time import sleep
sleep(0.5)
# Convert to brightness temperature
if not exists(tmb):
miriad.convert_flux_to_tb(cnv, tmb, self.freq, self.fwhm)
return
def _pipeline_s1d(self, iX, vgrad, genimg=True):
"""
SPARX-1D pipeline
"""
# Setup filenames
src = self.s1d_srcs[iX]
pop = self.s1d_pops[iX]
img = self.s1d_imgs[iX]
cnv = self.s1d_cnvs[iX]
tmb = self.s1d_tmbs[iX]
Xmol = self.Xmol_list[iX]
# Reset inputs (safer)
inputs.reset_inputs()
# Generate model grid
if not exists(src):
if self.ndiv == None:
tasks.task_leiden1d(out=src, xmol=Xmol, vgrad=vgrad, tk=opts.tk, tcmb=self.tcmb)
else:
tasks.task_leiden1d(out=src, xmol=Xmol, vgrad=vgrad, tk=opts.tk, tcmb=self.tcmb, ndiv=self.ndiv)
self._pipeline_sparx(Xmol, src, pop, img, cnv, tmb, genimg)
return
def _pipeline_s3d(self, iX, vgrad, genimg=True):
"""
SPARX-3D pipeline
"""
# Setup filenames
src = self.s3d_srcs[iX]
pop = self.s3d_pops[iX]
img = self.s3d_imgs[iX]
cnv = self.s3d_cnvs[iX]
tmb = self.s3d_tmbs[iX]
Xmol = self.Xmol_list[iX]
# Reset inputs (safer)
inputs.reset_inputs()
# Generate model grid
if not exists(src):
if self.ndiv == None:
ndiv = 16
else:
ndiv = self.ndiv * 2
tasks.task_leiden3d(out=src, xmol=Xmol, vgrad=vgrad, tk=opts.tk, tcmb=self.tcmb, ndiv=ndiv)
self._pipeline_sparx(Xmol, src, pop, img, cnv, tmb, genimg)
return
def plot_figure1(self, iX_range=None):
"""
Figure 1: shows the upper level population calculated with SPARX as a
function of radius, for different molecular abundances
"""
# Fall back to full range if iX_range not given
if iX_range == None:
iX_range = range(len(self.Xmol_list))
# Clear current figure
pl.cla()
for i in iX_range:
Xmol = self.Xmol_list[i]
if self.s1d_path :
# Get and plot S1D results
h5f = grid.SPARXH5(self.s1d_pops[i])
xlist = h5f.GetRadii()
ylist = h5f.GetRadial("lev1")
h5f.Close()
pl.plot(xlist, ylist, "-o", label="X(H2O)=%.2e (S1D)"%Xmol)
if self.s3d_path :
# Get and plot S3D results
h5f = grid.SPARXH5(self.s3d_pops[i])
xlist = h5f.GetRadii()
ylist = h5f.GetRadial("lev1")
h5f.Close()
pl.plot(xlist, ylist, "-^", label="X(H2O)=%.2e (S3D)"%Xmol)
# Plot analytic solution
pl.axhline(self.n_u_thick, color="r", ls=":", label="LTE limit")
pl.axhline(self.n_u_thin, color="b", ls=":", label="Optically thin limit")
# Setup plot
pl.xscale("log")
pl.yscale("log")
pl.xlabel("Radius [pc]")
pl.ylabel("Upper level fractional density")
pl.legend(loc="best", prop=LEGND_PROP)
pl.xlim((0, self.r / Unit.pc)) # pc
# Save plot
pl.savefig(self.name+"-fig1."+IMG_TYP)
return
def plot_figure2(self, iX_range=None):
"""
Figure 2: shows the upper level population at the center of the cloud
calculated with SPARX as a function of abundance
"""
# Fall back to full range if iX_range not given
if iX_range == None:
iX_range = range(len(self.Xmol_list))
# Clear figure
pl.cla()
# Allocate arrays for plotting
Xmol_arr = np.array(self.Xmol_list)
if self.s1d_path :
n_arr_s1d = np.zeros(shape=(len(self.Xmol_list)))
if self.s3d_path :
n_arr_s3d = np.zeros(shape=(len(self.Xmol_list)))
# Loop through abundance list and gather n_u at
# central zone
for i in iX_range:
if self.s1d_path :
# Get S1D results
h5f = grid.SPARXH5(self.s1d_pops[i])
levarr = h5f.GetRadial("lev1")
n_arr_s1d[i] = levarr[0]
h5f.Close()
if self.s3d_path :
# Get S3D results
h5f = grid.SPARXH5(self.s3d_pops[i])
levarr = h5f.GetRadial("lev1")
n_arr_s3d[i] = levarr[0]
h5f.Close()
# Plot analytic solution
pl.axhline(self.n_u_thick, color="r", ls=":", label="LTE limit")
pl.axhline(self.n_u_thin, color="b", ls=":", label="Optically thin limit")
# Plot SPARX solutions
if self.s1d_path:
pl.plot(Xmol_arr, n_arr_s1d, "c-o", label="S1D")
if self.s3d_path:
pl.plot(Xmol_arr, n_arr_s3d, "m-^", label="S3D")
# Setup plot
pl.xscale("log")
pl.yscale("log")
pl.xlabel("Molecular abundance")
pl.ylabel("Upper level fractional density")
pl.legend(loc="best", prop=LEGND_PROP)
pl.xlim((Xmol_arr[0], Xmol_arr[-1]))
# Save plot
pl.savefig(self.name+"-fig2."+IMG_TYP)
return
def plot_figure3(self, iX_range=None):
"""
Figure 3: shows the emergent spectrum (T_A vs. v) for a Gaussian beam of
projected size 10pc (FWHM)
"""
# Fall back to full range if iX_range not given
if iX_range == None:
iX_range = range(len(self.Xmol_list))
# Clear figure
pl.cla()
# Loop through abundance list
for i in iX_range:
Xmol = self.Xmol_list[i]
if self.s1d_path :
# Get and plot S1D results
tmb = miriad.MirXYV(self.s1d_tmbs[i])
velo = tmb.v_list / 1e3 # [m/s] -> [km/s]
spec = tmb.GetSpecOffASec(0, 0) # [K]
pl.plot(velo, spec, ":", label="X(H2O)=%.2e (S1D)"%Xmol)
if self.s3d_path :
# Get and plot S3D results
tmb = miriad.MirXYV(self.s3d_tmbs[i])
velo = tmb.v_list / 1e3 # [m/s] -> [km/s]
spec = tmb.GetSpecOffASec(0, 0) # [K]
pl.plot(velo, spec, "-.", label="X(H2O)=%.2e (S3D)"%Xmol)
# Setup plot
pl.yscale("log")
pl.xlabel("Projected velocity (km/s)")
pl.ylabel("Antenna temperature (K)")
pl.legend(loc="best", prop=LEGND_PROP)
pl.xlim(-1.5, 1.5)
#pl.ylim(1e-7, 1e-2)
# Save plot
pl.savefig(self.name+'-fig3.'+IMG_TYP)
return
def plot_figure4(self, iX_range=None):
"""
Figure 4: shows the total line luminosity as a function of the molecular
abundance
"""
# Fall back to full range if iX_range not given
if iX_range == None:
iX_range = range(len(self.Xmol_list))
# Clear figure
pl.cla()
# Setup data arrays
L_list_theory = []
L_list_s1d = []
L_list_s3d = []
# Loop through abundance list
for i in iX_range:
Xmol = self.Xmol_list[i]
# Calculate theoretical luminosity limit
L_list_theory += [self.calc_luminosity(Xmol) * 1e7] # [J s^-1] -> [erg s^-1]
if self.s1d_path:
cnv = miriad.MirXYV(self.s1d_cnvs[i])
F_nu = cnv.GetSpecOffASec(0, 0) # [Jy/Beam]
F_line = sum(F_nu) * 1e-26 # [J/m^2]
L_line = F_line * 4.0 * Cnst.pi * self.R_beam**2 * 1e7 # [J s^-1] -> [erg s^-1]
L_list_s1d += [L_line]
if self.s3d_path:
cnv = miriad.MirXYV(self.s3d_cnvs[i])
F_nu = cnv.GetSpecOffASec(0, 0) # [Jy/Beam]
F_line = sum(F_nu) * 1e-26 # [J/m^2]
L_line = F_line * 4.0 * Cnst.pi * self.R_beam**2 * 1e7 # [J s^-1] -> [erg s^-1]
L_list_s3d += [L_line]
# Plot all solutions
Xmol_list = [self.Xmol_list[i] for i in iX_range]
pl.plot(Xmol_list, L_list_theory, ":", label="Theoretical limit")
if len(L_list_s1d):
pl.plot(Xmol_list, L_list_s1d, "o", label="S1D")
if len(L_list_s3d):
pl.plot(Xmol_list, L_list_s3d, "^", label="S3D")
# Setup plot
pl.xscale("log")
pl.yscale("log")
pl.xlabel("Molecular fractional abundance")
pl.ylabel("Line luminosity (erg/s)")
pl.legend(loc="best", prop=LEGND_PROP)
pl.xlim(min(Xmol_list), max(Xmol_list))
# pl.ylim(1e26, 1e32)
# Save plot
pl.savefig(self.name+'-fig4.'+IMG_TYP)
return
def run(self, exc_only=False, nofig=False, no_intermediate=False):
"""
Run the benchmark problems
"""
# Don't generate images if excitation only is requested
genimg = not exc_only
# Calculate static problem for all abundances for all pipelines
for i in range(len(self.Xmol_list)):
print "Running static problem for Xmol=%g..."%self.Xmol_list[i]
# S1D problem
if self.s1d_path != None:
self._pipeline_s1d(i, '0kms^-1', genimg)
# S3D problem
if self.s3d_path != None:
self._pipeline_s3d(i, '0kms^-1', genimg)
# Plot intermediate results
if not nofig and not no_intermediate:
print "Generating intermediate figures..."
self.plot_figure1(range(0,i+1))
self.plot_figure2(range(0,i+1))
if genimg:
self.plot_figure3(range(0,i+1))
self.plot_figure4(range(0,i+1))
if not nofig:
print "Generating final figures..."
self.plot_figure1(range(0,i+1))
self.plot_figure2(range(0,i+1))
if genimg:
self.plot_figure3(range(0,i+1))
self.plot_figure4(range(0,i+1))
print "Static problem completed"
print
return
################################################################################
class LVG(Static):
"""
Analytic solution for the expanding cloud:
The expanding cloud problem is solved using Sobolev's method,
which gives analytical solutions for the level populations.
"""
def __init__(self, ndiv, Xmol_list, molec, alpha=100.0e3 / Unit.pc, fwhm=0.32e3, **kwargs):
self.alpha = alpha
self.name = NAME+"-lvg"
Static.__init__(self, ndiv, Xmol_list, molec, fwhm=fwhm, **kwargs)
return
def exc_t(self, Xmol):
"""
Calculate the `t' optical depth parameter for the LVG problem.
alpha = v / r (velocity gradient)
"""
return (1.0 / (8.0 * Cnst.pi * self.alpha)) * (Cnst.c**3.0 / self.freq**3.0) * self.mol.line_Aul[0] * self.n_H2 * Xmol
def n_u_hightau(self, x_mol):
"""
Calculate upper level fractional population for
high-optical depth conditions
"""
from math import sqrt
t = self.exc_t(x_mol)
u = self.exc_u
s = self.exc_s
return (t * (3.0 * u + 1.0) + s - sqrt((1.0 - u)**2.0 * t**2.0 + 2.0 * t * s * (3.0 * u + 1.0) + s**2.0)) / (4.0 * t * (u + 1.0))
def n_u_lowtau(self, x_mol):
"""
Calculate upper level fractional population for
low-optical depth conditions
"""
from math import exp
t = self.exc_t(x_mol)
u = self.exc_u
s = self.exc_s
return u * t / (s * (1.0 - exp(-t)))
def n_u_analytic(self, X_mol):
if X_mol <= 5.591e-8:
return self.n_u_lowtau(X_mol)
else:
return self.n_u_hightau(X_mol)
def plot_figure2(self, iX_range=None):
"""
Figure 2: shows the upper level population at the center of the cloud
calculated with SPARX as a function of abundance
"""
# Fall back to full range if iX_range not given
if iX_range == None:
iX_range = range(len(self.Xmol_list))
# Clear figure
pl.cla()
# Allocate arrays for plotting
Xmol_arr = np.array(self.Xmol_list)
if self.s1d_path :
n_arr_s1d = np.zeros(shape=(len(self.Xmol_list)))
if self.s3d_path :
n_arr_s3d = np.zeros(shape=(len(self.Xmol_list)))
# Loop through abundance list and gather n_u at
# central zone
for i in iX_range:
if self.s1d_path :
# Get S1D results
h5f = grid.SPARXH5(self.s1d_pops[i])
levarr = h5f.GetRadial("lev1")
n_arr_s1d[i] = levarr[0]
h5f.Close()
if self.s3d_path :
# Get S3D results
h5f = grid.SPARXH5(self.s3d_pops[i])
levarr = h5f.GetRadial("lev1")
n_arr_s3d[i] = levarr[0]
h5f.Close()
# Plot analytic solutions
pl.axhline(self.n_u_thick, color="r", ls=":", label="LTE limit")
pl.axhline(self.n_u_thin, color="b", ls=":", label="Optically thin limit")
xarr = utils.generate_log_points(Xmol_arr[0], Xmol_arr[-1], 100)
pl.plot(xarr, [self.n_u_analytic(Xmol) for Xmol in xarr], color="g", ls="-.", label="Sobolev approximation")
# Plot SPARX solutions
if self.s1d_path:
pl.plot(Xmol_arr, n_arr_s1d, "c-o", label="S1D")
if self.s3d_path:
pl.plot(Xmol_arr, n_arr_s3d, "m-^", label="S3D")
# Setup plot
pl.xscale("log")
pl.yscale("log")
pl.xlabel("Molecular abundance")
pl.ylabel("Upper level fractional density")
pl.legend(loc="best", prop=LEGND_PROP)
pl.xlim((Xmol_arr[0], Xmol_arr[-1]))
# Save plot
pl.savefig(self.name+"-fig2."+IMG_TYP)
return
def run(self, vgrad="100kms^-1", nofig=False, no_intermediate=False):
"""
Run the benchmark problems
"""
# Calculate static problem for all abundances for all pipelines
for i in range(len(self.Xmol_list)):
print "Running LVG problem for Xmol=%g..."%self.Xmol_list[i]
# S1D problem
if self.s1d_path != None:
self._pipeline_s1d(i, vgrad, genimg=False)
# S3D problem
if self.s3d_path != None:
self._pipeline_s3d(i, vgrad, genimg=False)
# Plot intermediate results
if not nofig and not no_intermediate:
print "Generating intermediate figures..."
self.plot_figure1(range(0,i+1))
self.plot_figure2(range(0,i+1))
if not nofig:
print "Generating final figures..."
self.plot_figure1(range(len(self.Xmol_list)))
self.plot_figure2(range(len(self.Xmol_list)))
print "LVG problem completed"
print
return
################################################################################
##
## Main
##
if __name__ == "__main__":
##
## Parse inputs
##
# Init option parser
from optparse import OptionParser
parser = OptionParser()
# Setup options
parser.add_option("--ndiv", metavar="POSINT", dest="ndiv", nargs=1, default="8", help="Number of zones along radial direction")
parser.add_option("--xmol", metavar="XLIST", dest="xmol", nargs=1, default="1e-10,1e-9,1e-8,1e-7,1e-6", help="List of abundances to calcualte")
parser.add_option("--tk", metavar="TK", dest="tk", nargs=1, default="40K", help="Kinetic temperature")
parser.add_option("--vgrad", metavar="VELGRAD", dest="vgrad", nargs=1, default="100kms^-1", help="Velocity gradient of LVG problem")
parser.add_option("--tcmb", metavar="TCMB", dest="tcmb", nargs=1, default="0K", help="Brightness temperature of background radiation")
parser.add_option("--snr", metavar="SNR", dest="snr", nargs=1, default="20", help="Final Monte Carlo S/N ratio")
parser.add_option("--tolerance", metavar="TOLERANCE", dest="tolerance", nargs=1, default="1e-9", help="Convergence criterion for fixed rays stage")
parser.add_option("--nrays", metavar="NRAYS", dest="nrays", nargs=1, default="1000", help="Number of initial rays")
parser.add_option("--maxiter", metavar="MAXITER", dest="maxiter", nargs=1, default="1000", help="Maximum number of iterations for converging Jbar and n")
parser.add_option("--molec", metavar="MOLEC", dest="molec", nargs=1, default="o-h2o_2lev", help="Molecule used in the benchmark")
parser.add_option("--clear", dest="clear", action="store_true", default=False, help="Whether to remove old files")
parser.add_option("--trace", dest="trace", action="store_true", default=False, help="Whether to trace convergence history")
parser.add_option("--from-lte", dest="from_lte", action="store_true", default=False, help="Start convergence from LTE conditions")
parser.add_option("--static-only", dest="static_only", action="store_true", default=False, help="Do static problem only")
parser.add_option("--lvg-only", dest="lvg_only", action="store_true", default=False, help="Do LVG problem only")
parser.add_option("--exc-only", dest="exc_only", action="store_true", default=False, help="Calculate excitation only")
parser.add_option("--nofig", dest="nofig", action="store_true", default=False, help="Do not plot figures")
parser.add_option("--no-intermediate", dest="no_intermediate", action="store_true", default=False, help="Do not plot intermediate figures")
parser.add_option("--orig", dest="orig", action="store_true", default=False, help="Use original problem description")
parser.add_option("--1d-only", dest="only1d", action="store_true", default=False, help="Do 1D problem only")
parser.add_option("--np", metavar="NPROC", dest="nproc", nargs=1, default="1", help="Number of parallel processes")
# The actual parsing
(opts, args) = parser.parse_args()
# The list of molecular abundances to be considered
#Xmol_LIST = [1e-10, 1e-9, 1e-8, 1e-7, 1e-6, 1e-5][:]
Xmol_LIST = [float(i) for i in opts.xmol.split(",")]
for i in Xmol_LIST:
if i <= 0:
raise Exception, "xmol must be > 0"
# SNR
SNR = float(opts.snr)
# NRAYS
NRAYS = int(opts.nrays)
# FROMLTE
FROMLTE = opts.from_lte
# TRACE
TRACE = opts.trace
# TOLERANCE
TOLERANCE = float(opts.tolerance)
# NPROC
NPROC = int(opts.nproc)
# MAXITER
MAXITER = int(opts.maxiter)
# NDIV
if opts.orig:
NDIV = None
else:
NDIV = int(opts.ndiv)
# Clear old files?
if opts.clear:
from glob import glob
utils.confirm_remove_files(glob("./%s*"%NAME))
if opts.only1d:
s3d_path = None
else:
s3d_path = "."
##
## Run validation
##
# Calculate static problem
if not opts.lvg_only:
static = Static(NDIV, Xmol_LIST, opts.molec, tcmb=opts.tcmb, s1d_path=".", s3d_path=s3d_path)
static.run(opts.exc_only, opts.nofig, opts.no_intermediate)
# Calculate LVG problem
if not opts.static_only:
lvg = LVG(NDIV, Xmol_LIST, opts.molec, tcmb=opts.tcmb, s1d_path=".", s3d_path=s3d_path)
lvg.run(opts.vgrad, opts.nofig, opts.no_intermediate)
| gpl-3.0 |
datapythonista/pandas | pandas/tests/series/test_arithmetic.py | 1 | 31525 | from datetime import timedelta
import operator
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import IncompatibleFrequency
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
)
import pandas as pd
from pandas import (
Categorical,
Index,
IntervalIndex,
Series,
Timedelta,
bdate_range,
date_range,
isna,
)
import pandas._testing as tm
from pandas.core import (
nanops,
ops,
)
from pandas.core.computation import expressions as expr
@pytest.fixture(
autouse=True, scope="module", params=[0, 1000000], ids=["numexpr", "python"]
)
def switch_numexpr_min_elements(request):
_MIN_ELEMENTS = expr._MIN_ELEMENTS
expr._MIN_ELEMENTS = request.param
yield request.param
expr._MIN_ELEMENTS = _MIN_ELEMENTS
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestSeriesFlexArithmetic:
@pytest.mark.parametrize(
"ts",
[
(lambda x: x, lambda x: x * 2, False),
(lambda x: x, lambda x: x[::2], False),
(lambda x: x, lambda x: 5, True),
(lambda x: tm.makeFloatSeries(), lambda x: tm.makeFloatSeries(), True),
],
)
@pytest.mark.parametrize(
"opname", ["add", "sub", "mul", "floordiv", "truediv", "pow"]
)
def test_flex_method_equivalence(self, opname, ts):
# check that Series.{opname} behaves like Series.__{opname}__,
tser = tm.makeTimeSeries().rename("ts")
series = ts[0](tser)
other = ts[1](tser)
check_reverse = ts[2]
op = getattr(Series, opname)
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
def test_flex_method_subclass_metadata_preservation(self, all_arithmetic_operators):
# GH 13208
class MySeries(Series):
_metadata = ["x"]
@property
def _constructor(self):
return MySeries
opname = all_arithmetic_operators
op = getattr(Series, opname)
m = MySeries([1, 2, 3], name="test")
m.x = 42
result = op(m, 1)
assert result.x == 42
def test_flex_add_scalar_fill_value(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
tm.assert_series_equal(res, exp)
pairings = [(Series.div, operator.truediv, 1), (Series.rdiv, ops.rtruediv, 1)]
for op in ["add", "sub", "mul", "pow", "truediv", "floordiv"]:
fv = 0
lop = getattr(Series, op)
lequiv = getattr(operator, op)
rop = getattr(Series, "r" + op)
# bind op at definition time...
requiv = lambda x, y, op=op: getattr(operator, op)(y, x)
pairings.append((lop, lequiv, fv))
pairings.append((rop, requiv, fv))
@pytest.mark.parametrize("op, equiv_op, fv", pairings)
def test_operators_combine(self, op, equiv_op, fv):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isna(a)
bmask = isna(b)
exp_values = []
for i in range(len(exp_index)):
with np.errstate(all="ignore"):
if amask[i]:
if bmask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
tm.assert_series_equal(result, expected)
a = Series([np.nan, 1.0, 2.0, 3.0, np.nan], index=np.arange(5))
b = Series([np.nan, 1, np.nan, 3, np.nan, 4.0], index=np.arange(6))
result = op(a, b)
exp = equiv_op(a, b)
tm.assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
# should accept axis=0 or axis='rows'
op(a, b, axis=0)
class TestSeriesArithmetic:
# Some of these may end up in tests/arithmetic, but are not yet sorted
def test_add_series_with_period_index(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.iloc[1::2] = np.nan
tm.assert_series_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_series_equal(result, expected)
msg = "Input has different freq=D from Period\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
ts + ts.asfreq("D", how="end")
@pytest.mark.parametrize(
"target_add,input_value,expected_value",
[
("!", ["hello", "world"], ["hello!", "world!"]),
("m", ["hello", "world"], ["hellom", "worldm"]),
],
)
def test_string_addition(self, target_add, input_value, expected_value):
# GH28658 - ensure adding 'm' does not raise an error
a = Series(input_value)
result = a + target_add
expected = Series(expected_value)
tm.assert_series_equal(result, expected)
def test_divmod(self):
# GH#25557
a = Series([1, 1, 1, np.nan], index=["a", "b", "c", "d"])
b = Series([2, np.nan, 1, np.nan], index=["a", "b", "d", "e"])
result = a.divmod(b)
expected = divmod(a, b)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
result = a.rdivmod(b)
expected = divmod(b, a)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
@pytest.mark.parametrize("index", [None, range(9)])
def test_series_integer_mod(self, index):
# GH#24396
s1 = Series(range(1, 10))
s2 = Series("foo", index=index)
msg = "not all arguments converted during string formatting"
with pytest.raises(TypeError, match=msg):
s2 % s1
def test_add_with_duplicate_index(self):
# GH14227
s1 = Series([1, 2], index=[1, 1])
s2 = Series([10, 10], index=[1, 2])
result = s1 + s2
expected = Series([11, 12, np.nan], index=[1, 1, 2])
tm.assert_series_equal(result, expected)
def test_add_na_handling(self):
from datetime import date
from decimal import Decimal
s = Series(
[Decimal("1.3"), Decimal("2.3")], index=[date(2012, 1, 1), date(2012, 1, 2)]
)
result = s + s.shift(1)
result2 = s.shift(1) + s
assert isna(result[0])
assert isna(result2[0])
def test_add_corner_cases(self, datetime_series):
empty = Series([], index=Index([]), dtype=np.float64)
result = datetime_series + empty
assert np.isnan(result).all()
result = empty + empty.copy()
assert len(result) == 0
# FIXME: dont leave commented-out
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = datetime_series.astype(int)[:-5]
added = datetime_series + int_ts
expected = Series(
datetime_series.values[:-5] + int_ts.values,
index=datetime_series.index[:-5],
name="ts",
)
tm.assert_series_equal(added[:-5], expected)
def test_mul_empty_int_corner_case(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({"x": 0.0})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=["x"]))
def test_sub_datetimelike_align(self):
# GH#7500
# datetimelike ops need to align
dt = Series(date_range("2012-1-1", periods=3, freq="D"))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
tm.assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
tm.assert_series_equal(result, expected)
def test_alignment_doesnt_change_tz(self):
# GH#33671
dti = date_range("2016-01-01", periods=10, tz="CET")
dti_utc = dti.tz_convert("UTC")
ser = Series(10, index=dti)
ser_utc = Series(10, index=dti_utc)
# we don't care about the result, just that original indexes are unchanged
ser * ser_utc
assert ser.index is dti
assert ser_utc.index is dti_utc
def test_arithmetic_with_duplicate_index(self):
# GH#8363
# integer ops with a non-unique index
index = [2, 2, 3, 3, 4]
ser = Series(np.arange(1, 6, dtype="int64"), index=index)
other = Series(np.arange(5, dtype="int64"), index=index)
result = ser - other
expected = Series(1, index=[2, 2, 3, 3, 4])
tm.assert_series_equal(result, expected)
# GH#8363
# datetime ops with a non-unique index
ser = Series(date_range("20130101 09:00:00", periods=5), index=index)
other = Series(date_range("20130101", periods=5), index=index)
result = ser - other
expected = Series(Timedelta("9 hours"), index=[2, 2, 3, 3, 4])
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Comparisons
class TestSeriesFlexComparison:
@pytest.mark.parametrize("axis", [0, None, "index"])
def test_comparison_flex_basic(self, axis, all_compare_operators):
op = all_compare_operators.strip("__")
left = Series(np.random.randn(10))
right = Series(np.random.randn(10))
result = getattr(left, op)(right, axis=axis)
expected = getattr(operator, op)(left, right)
tm.assert_series_equal(result, expected)
def test_comparison_bad_axis(self, all_compare_operators):
op = all_compare_operators.strip("__")
left = Series(np.random.randn(10))
right = Series(np.random.randn(10))
msg = "No axis named 1 for object type"
with pytest.raises(ValueError, match=msg):
getattr(left, op)(right, axis=1)
@pytest.mark.parametrize(
"values, op",
[
([False, False, True, False], "eq"),
([True, True, False, True], "ne"),
([False, False, True, False], "le"),
([False, False, False, False], "lt"),
([False, True, True, False], "ge"),
([False, True, False, False], "gt"),
],
)
def test_comparison_flex_alignment(self, values, op):
left = Series([1, 3, 2], index=list("abc"))
right = Series([2, 2, 2], index=list("bcd"))
result = getattr(left, op)(right)
expected = Series(values, index=list("abcd"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"values, op, fill_value",
[
([False, False, True, True], "eq", 2),
([True, True, False, False], "ne", 2),
([False, False, True, True], "le", 0),
([False, False, False, True], "lt", 0),
([True, True, True, False], "ge", 0),
([True, True, False, False], "gt", 0),
],
)
def test_comparison_flex_alignment_fill(self, values, op, fill_value):
left = Series([1, 3, 2], index=list("abc"))
right = Series([2, 2, 2], index=list("bcd"))
result = getattr(left, op)(right, fill_value=fill_value)
expected = Series(values, index=list("abcd"))
tm.assert_series_equal(result, expected)
class TestSeriesComparison:
def test_comparison_different_length(self):
a = Series(["a", "b", "c"])
b = Series(["b", "a"])
msg = "only compare identically-labeled Series"
with pytest.raises(ValueError, match=msg):
a < b
a = Series([1, 2])
b = Series([2, 3, 4])
with pytest.raises(ValueError, match=msg):
a == b
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_ser_flex_cmp_return_dtypes(self, opname):
# GH#15115
ser = Series([1, 3, 2], index=range(3))
const = 2
result = getattr(ser, opname)(const).dtypes
expected = np.dtype("bool")
assert result == expected
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_ser_flex_cmp_return_dtypes_empty(self, opname):
# GH#15115 empty Series case
ser = Series([1, 3, 2], index=range(3))
empty = ser.iloc[:0]
const = 2
result = getattr(empty, opname)(const).dtypes
expected = np.dtype("bool")
assert result == expected
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.le, operator.lt, operator.ge, operator.gt],
)
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("baz", "baz", "baz")]
)
def test_ser_cmp_result_names(self, names, op):
# datetime64 dtype
dti = date_range("1949-06-07 03:00:00", freq="H", periods=5, name=names[0])
ser = Series(dti).rename(names[1])
result = op(ser, dti)
assert result.name == names[2]
# datetime64tz dtype
dti = dti.tz_localize("US/Central")
dti = pd.DatetimeIndex(dti, freq="infer") # freq not preserved by tz_localize
ser = Series(dti).rename(names[1])
result = op(ser, dti)
assert result.name == names[2]
# timedelta64 dtype
tdi = dti - dti.shift(1)
ser = Series(tdi).rename(names[1])
result = op(ser, tdi)
assert result.name == names[2]
# interval dtype
if op in [operator.eq, operator.ne]:
# interval dtype comparisons not yet implemented
ii = pd.interval_range(start=0, periods=5, name=names[0])
ser = Series(ii).rename(names[1])
result = op(ser, ii)
assert result.name == names[2]
# categorical
if op in [operator.eq, operator.ne]:
# categorical dtype comparisons raise for inequalities
cidx = tdi.astype("category")
ser = Series(cidx).rename(names[1])
result = op(ser, cidx)
assert result.name == names[2]
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid="ignore"):
expected = (left > right).astype("O")
expected[:3] = np.nan
tm.assert_almost_equal(result, expected)
s = Series(["a", "b", "c"])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
# -----------------------------------------------------------------
# Categorical Dtype Comparisons
def test_categorical_comparisons(self):
# GH#8938
# allow equality comparisons
a = Series(list("abc"), dtype="category")
b = Series(list("abc"), dtype="object")
c = Series(["a", "b", "cc"], dtype="object")
d = Series(list("acb"), dtype="object")
e = Categorical(list("abc"))
f = Categorical(list("acb"))
# vs scalar
assert not (a == "a").all()
assert ((a != "a") == ~(a == "a")).all()
assert not ("a" == a).all()
assert (a == "a")[0]
assert ("a" == a)[0]
assert not ("a" != a)[0]
# vs list-like
assert (a == a).all()
assert not (a != a).all()
assert (a == list(a)).all()
assert (a == b).all()
assert (b == a).all()
assert ((~(a == b)) == (a != b)).all()
assert ((~(b == a)) == (b != a)).all()
assert not (a == c).all()
assert not (c == a).all()
assert not (a == d).all()
assert not (d == a).all()
# vs a cat-like
assert (a == e).all()
assert (e == a).all()
assert not (a == f).all()
assert not (f == a).all()
assert (~(a == e) == (a != e)).all()
assert (~(e == a) == (e != a)).all()
assert (~(a == f) == (a != f)).all()
assert (~(f == a) == (f != a)).all()
# non-equality is not comparable
msg = "can only compare equality or not"
with pytest.raises(TypeError, match=msg):
a < b
with pytest.raises(TypeError, match=msg):
b < a
with pytest.raises(TypeError, match=msg):
a > b
with pytest.raises(TypeError, match=msg):
b > a
def test_unequal_categorical_comparison_raises_type_error(self):
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
msg = "can only compare equality or not"
with pytest.raises(TypeError, match=msg):
cat > "b"
cat = Series(Categorical(list("abc"), ordered=False))
with pytest.raises(TypeError, match=msg):
cat > "b"
# https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
msg = "Invalid comparison between dtype=category and str"
with pytest.raises(TypeError, match=msg):
cat < "d"
with pytest.raises(TypeError, match=msg):
cat > "d"
with pytest.raises(TypeError, match=msg):
"d" < cat
with pytest.raises(TypeError, match=msg):
"d" > cat
tm.assert_series_equal(cat == "d", Series([False, False, False]))
tm.assert_series_equal(cat != "d", Series([True, True, True]))
# -----------------------------------------------------------------
def test_comparison_tuples(self):
# GH#11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
tm.assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
tm.assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
tm.assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
tm.assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
tm.assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
tm.assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
tm.assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self, all_compare_operators):
op = all_compare_operators
ser = Series(bdate_range("1/1/2000", periods=10), dtype=object)
ser[::2] = np.nan
f = getattr(operator, op)
# test that comparisons work
val = ser[5]
result = f(ser, val)
expected = f(ser.dropna(), val).reindex(ser.index)
if op == "__ne__":
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
tm.assert_series_equal(result, expected)
# FIXME: dont leave commented-out
# result = f(val, ser)
# expected = f(val, ser.dropna()).reindex(ser.index)
# tm.assert_series_equal(result, expected)
def test_ne(self):
ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)
expected = [True, True, False, True, True]
assert tm.equalContents(ts.index != 5, expected)
assert tm.equalContents(~(ts.index == 5), expected)
@pytest.mark.parametrize(
"left, right",
[
(
Series([1, 2, 3], index=list("ABC"), name="x"),
Series([2, 2, 2], index=list("ABD"), name="x"),
),
(
Series([1, 2, 3], index=list("ABC"), name="x"),
Series([2, 2, 2, 2], index=list("ABCD"), name="x"),
),
],
)
def test_comp_ops_df_compat(self, left, right, frame_or_series):
# GH 1134
msg = f"Can only compare identically-labeled {frame_or_series.__name__} objects"
if frame_or_series is not Series:
left = left.to_frame()
right = right.to_frame()
with pytest.raises(ValueError, match=msg):
left == right
with pytest.raises(ValueError, match=msg):
right == left
with pytest.raises(ValueError, match=msg):
left != right
with pytest.raises(ValueError, match=msg):
right != left
with pytest.raises(ValueError, match=msg):
left < right
with pytest.raises(ValueError, match=msg):
right < left
def test_compare_series_interval_keyword(self):
# GH#25338
s = Series(["IntervalA", "IntervalB", "IntervalC"])
result = s == "IntervalA"
expected = Series([True, False, False])
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Unsorted
# These arithmetic tests were previously in other files, eventually
# should be parametrized and put into tests.arithmetic
class TestTimeSeriesArithmetic:
# TODO: De-duplicate with test below
def test_series_add_tz_mismatch_converts_to_utc_duplicate(self):
rng = date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern")
ser = Series(np.random.randn(len(rng)), index=rng)
ts_moscow = ser.tz_convert("Europe/Moscow")
result = ser + ts_moscow
assert result.index.tz is pytz.utc
result = ts_moscow + ser
assert result.index.tz is pytz.utc
def test_series_add_tz_mismatch_converts_to_utc(self):
rng = date_range("1/1/2011", periods=100, freq="H", tz="utc")
perm = np.random.permutation(100)[:90]
ser1 = Series(
np.random.randn(90), index=rng.take(perm).tz_convert("US/Eastern")
)
perm = np.random.permutation(100)[:90]
ser2 = Series(
np.random.randn(90), index=rng.take(perm).tz_convert("Europe/Berlin")
)
result = ser1 + ser2
uts1 = ser1.tz_convert("utc")
uts2 = ser2.tz_convert("utc")
expected = uts1 + uts2
assert result.index.tz == pytz.UTC
tm.assert_series_equal(result, expected)
def test_series_add_aware_naive_raises(self):
rng = date_range("1/1/2011", periods=10, freq="H")
ser = Series(np.random.randn(len(rng)), index=rng)
ser_utc = ser.tz_localize("utc")
msg = "Cannot join tz-naive with tz-aware DatetimeIndex"
with pytest.raises(Exception, match=msg):
ser + ser_utc
with pytest.raises(Exception, match=msg):
ser_utc + ser
def test_datetime_understood(self):
# Ensures it doesn't fail to create the right series
# reported in issue#16726
series = Series(date_range("2012-01-01", periods=3))
offset = pd.offsets.DateOffset(days=6)
result = series - offset
expected = Series(pd.to_datetime(["2011-12-26", "2011-12-27", "2011-12-28"]))
tm.assert_series_equal(result, expected)
def test_align_date_objects_with_datetimeindex(self):
rng = date_range("1/1/2000", periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
class TestNamePreservation:
@pytest.mark.parametrize("box", [list, tuple, np.array, Index, Series, pd.array])
@pytest.mark.parametrize("flex", [True, False])
def test_series_ops_name_retention(
self, request, flex, box, names, all_binary_operators
):
# GH#33930 consistent name renteiton
op = all_binary_operators
if op is ops.rfloordiv and box in [list, tuple] and not flex:
request.node.add_marker(
pytest.mark.xfail(
reason="op fails because of inconsistent ndarray-wrapping GH#28759"
)
)
left = Series(range(10), name=names[0])
right = Series(range(10), name=names[1])
name = op.__name__.strip("_")
is_logical = name in ["and", "rand", "xor", "rxor", "or", "ror"]
is_rlogical = is_logical and name.startswith("r")
right = box(right)
if flex:
if is_logical:
# Series doesn't have these as flex methods
return
result = getattr(left, name)(right)
else:
# GH#37374 logical ops behaving as set ops deprecated
warn = FutureWarning if is_rlogical and box is Index else None
msg = "operating as a set operation is deprecated"
with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):
# stacklevel is correct for Index op, not reversed op
result = op(left, right)
if box is Index and is_rlogical:
# Index treats these as set operators, so does not defer
assert isinstance(result, Index)
return
assert isinstance(result, Series)
if box in [Index, Series]:
assert result.name == names[2]
else:
assert result.name == names[0]
def test_binop_maybe_preserve_name(self, datetime_series):
# names match, preserve
result = datetime_series * datetime_series
assert result.name == datetime_series.name
result = datetime_series.mul(datetime_series)
assert result.name == datetime_series.name
result = datetime_series * datetime_series[:-2]
assert result.name == datetime_series.name
# names don't match, don't preserve
cp = datetime_series.copy()
cp.name = "something else"
result = datetime_series + cp
assert result.name is None
result = datetime_series.add(cp)
assert result.name is None
ops = ["add", "sub", "mul", "div", "truediv", "floordiv", "mod", "pow"]
ops = ops + ["r" + op for op in ops]
for op in ops:
# names match, preserve
ser = datetime_series.copy()
result = getattr(ser, op)(ser)
assert result.name == datetime_series.name
# names don't match, don't preserve
cp = datetime_series.copy()
cp.name = "changed"
result = getattr(ser, op)(cp)
assert result.name is None
def test_scalarop_preserve_name(self, datetime_series):
result = datetime_series * 2
assert result.name == datetime_series.name
class TestInplaceOperations:
@pytest.mark.parametrize(
"dtype1, dtype2, dtype_expected, dtype_mul",
(
("Int64", "Int64", "Int64", "Int64"),
("float", "float", "float", "float"),
("Int64", "float", "Float64", "Float64"),
("Int64", "Float64", "Float64", "Float64"),
),
)
def test_series_inplace_ops(self, dtype1, dtype2, dtype_expected, dtype_mul):
# GH 37910
ser1 = Series([1], dtype=dtype1)
ser2 = Series([2], dtype=dtype2)
ser1 += ser2
expected = Series([3], dtype=dtype_expected)
tm.assert_series_equal(ser1, expected)
ser1 -= ser2
expected = Series([1], dtype=dtype_expected)
tm.assert_series_equal(ser1, expected)
ser1 *= ser2
expected = Series([2], dtype=dtype_mul)
tm.assert_series_equal(ser1, expected)
def test_none_comparison(series_with_simple_index):
series = series_with_simple_index
if isinstance(series.index, IntervalIndex):
# IntervalIndex breaks on "series[0] = np.nan" below
pytest.skip("IntervalIndex doesn't support assignment")
if len(series) < 1:
pytest.skip("Test doesn't make sense on empty data")
# bug brought up by #1079
# changed from TypeError in 0.17.0
series[0] = np.nan
# noinspection PyComparisonWithNone
result = series == None # noqa
assert not result.iat[0]
assert not result.iat[1]
# noinspection PyComparisonWithNone
result = series != None # noqa
assert result.iat[0]
assert result.iat[1]
result = None == series # noqa
assert not result.iat[0]
assert not result.iat[1]
result = None != series # noqa
assert result.iat[0]
assert result.iat[1]
if is_datetime64_dtype(series.dtype) or is_datetime64tz_dtype(series.dtype):
# Following DatetimeIndex (and Timestamp) convention,
# inequality comparisons with Series[datetime64] raise
msg = "Invalid comparison"
with pytest.raises(TypeError, match=msg):
None > series
with pytest.raises(TypeError, match=msg):
series > None
else:
result = None > series
assert not result.iat[0]
assert not result.iat[1]
result = series < None
assert not result.iat[0]
assert not result.iat[1]
def test_series_varied_multiindex_alignment():
# GH 20414
s1 = Series(
range(8),
index=pd.MultiIndex.from_product(
[list("ab"), list("xy"), [1, 2]], names=["ab", "xy", "num"]
),
)
s2 = Series(
[1000 * i for i in range(1, 5)],
index=pd.MultiIndex.from_product([list("xy"), [1, 2]], names=["xy", "num"]),
)
result = s1.loc[pd.IndexSlice["a", :, :]] + s2
expected = Series(
[1000, 2001, 3002, 4003],
index=pd.MultiIndex.from_tuples(
[("a", "x", 1), ("a", "x", 2), ("a", "y", 1), ("a", "y", 2)],
names=["ab", "xy", "num"],
),
)
tm.assert_series_equal(result, expected)
| bsd-3-clause |
jrmyp/attelo | attelo/parser/tests.py | 1 | 9095 | """
attelo.parser tests
"""
from __future__ import print_function
from sklearn.linear_model import (LogisticRegression)
import itertools as itr
import numpy as np
import scipy
import unittest
from attelo.decoding.astar import (AstarArgs,
Heuristic,
RfcConstraint,
AstarDecoder)
from attelo.decoding.baseline import (LastBaseline,
LocalBaseline)
from attelo.decoding.mst import (MstDecoder,
MstRootStrategy)
from attelo.decoding.greedy import (LocallyGreedy)
from attelo.decoding.tests import (DecoderTest)
from attelo.decoding.window import (WindowPruner)
from attelo.edu import EDU, FAKE_ROOT, FAKE_ROOT_ID
from attelo.learning.local import (SklearnAttachClassifier,
SklearnLabelClassifier)
from attelo.learning.perceptron import (StructuredPerceptron)
from attelo.table import (DataPack)
from attelo.util import (Team)
from .full import (JointPipeline,
PostlabelPipeline)
from .pipeline import (Pipeline)
from .intra import (HeadToHeadParser,
IntraInterPair,
SentOnlyParser,
SoftParser,
for_intra,
partition_subgroupings)
# pylint: disable=too-few-public-methods
DEFAULT_ASTAR_ARGS = AstarArgs(heuristics=Heuristic.average,
rfc=RfcConstraint.none,
beam=None,
use_prob=True)
# select a decoder and a learner team
MST_DECODER = MstDecoder(root_strategy=MstRootStrategy.fake_root)
ASTAR_DECODER = AstarDecoder(DEFAULT_ASTAR_ARGS)
DECODERS = [
LastBaseline(),
LocalBaseline(0.5, use_prob=False),
MST_DECODER,
ASTAR_DECODER,
LocallyGreedy(),
Pipeline(steps=[('window pruner', WindowPruner(2)),
('decoder', ASTAR_DECODER)]),
]
LEARNERS = [
Team(attach=SklearnAttachClassifier(LogisticRegression()),
label=SklearnLabelClassifier(LogisticRegression())),
]
class ParserTest(DecoderTest):
"""
Tests for the attelo.parser infrastructure
"""
def _test_parser(self, parser):
"""
Train a parser and decode on the same data (not a really
meaningful test but just trying to make sure we exercise
as much code as possible)
"""
target = np.array([1, 2, 3, 1, 4, 3])
parser.fit([self.dpack], [target])
parser.transform(self.dpack)
def test_decoder_by_itself(self):
for parser in DECODERS:
self._test_parser(parser)
def test_joint_parser(self):
for l, d in itr.product(LEARNERS, DECODERS):
parser = JointPipeline(learner_attach=l.attach,
learner_label=l.label,
decoder=d)
self._test_parser(parser)
def test_postlabel_parser(self):
learners = LEARNERS + [
Team(attach=StructuredPerceptron(MST_DECODER,
n_iter=3,
average=True,
use_prob=False),
label=SklearnLabelClassifier(LogisticRegression())),
]
for l, d in itr.product(learners, DECODERS):
parser = PostlabelPipeline(learner_attach=l.attach,
learner_label=l.label,
decoder=d)
self._test_parser(parser)
class IntraTest(unittest.TestCase):
"""Intrasentential parser"""
@staticmethod
def _dpack_1():
"example datapack for testing"
# pylint: disable=invalid-name
a1 = EDU('a1', '', 0, 0, 'a', 's1')
a2 = EDU('a2', '', 0, 0, 'a', 's1')
a3 = EDU('a3', '', 0, 0, 'a', 's1')
b1 = EDU('b1', '', 0, 0, 'a', 's2')
b2 = EDU('b2', '', 0, 0, 'a', 's2')
b3 = EDU('b3', '', 0, 0, 'a', 's2')
# pylint: enable=invalid-name
orig_classes = ['__UNK__', 'UNRELATED', 'ROOT', 'x']
dpack = DataPack.load(edus=[a1, a2, a3,
b1, b2, b3],
pairings=[(FAKE_ROOT, a1),
(FAKE_ROOT, a2),
(FAKE_ROOT, a3),
(a1, a2),
(a1, a3),
(a2, a3),
(a2, a1),
(a3, a1),
(a3, a2),
(FAKE_ROOT, b1),
(FAKE_ROOT, b2),
(FAKE_ROOT, b3),
(b1, b2),
(b1, b3),
(b2, b3),
(b2, b1),
(b3, b1),
(b3, b2),
(a1, b1)],
data=scipy.sparse.csr_matrix([[1], [1], [1],
[1], [1], [1],
[1], [1], [1],
[1], [1], [1],
[1], [1], [1],
[1], [1], [1],
[1]]),
target=np.array([2, 1, 1, 3, 1, 3, 1, 1, 1,
1, 1, 2, 3, 1, 3, 1, 1, 1,
3]),
ctarget=dict(), # WIP
labels=orig_classes,
vocab=None)
return dpack
def test_partition_subgroupings(self):
'test that sentences are split correctly'
big_dpack = self._dpack_1()
partitions = [big_dpack.selected(idxs)
for idxs in partition_subgroupings(big_dpack)]
all_valid = frozenset(x.subgrouping for x in big_dpack.edus)
all_subgroupings = set()
for dpack in partitions:
valid = dpack.edus[0].subgrouping
subgroupings = set()
for edu1, edu2 in dpack.pairings:
if edu1.id != FAKE_ROOT_ID:
subgroupings.add(edu1.subgrouping)
subgroupings.add(edu2.subgrouping)
all_subgroupings |= subgroupings
self.assertEqual(list(subgroupings), [valid])
self.assertEqual(all_valid, all_subgroupings)
self.assertEqual(len(all_subgroupings), len(partitions))
def test_for_intra(self):
'test that sentence roots are identified correctly'
dpack = self._dpack_1()
ipack, _ = for_intra(dpack, dpack.target)
sroots = np.where(ipack.target == ipack.label_number('ROOT'))[0]
sroot_pairs = ipack.selected(sroots).pairings
self.assertTrue(all(edu1 == FAKE_ROOT for edu1, edu2 in sroot_pairs),
'all root links are roots')
self.assertEqual(set(e2.subgrouping for _, e2 in sroot_pairs),
set(e.subgrouping for e in dpack.edus),
'every sentence represented')
def _test_parser(self, parser):
"""
Train a parser and decode on the same data (not a really
meaningful test but just trying to make sure we exercise
as much code as possible)
"""
dpack = self._dpack_1()
parser.fit([dpack], [dpack.target])
parser.transform(dpack)
def test_intra_parsers(self):
'test all intra/inter parsers on a dpack'
learner_intra = Team(
attach=SklearnAttachClassifier(LogisticRegression()),
label=SklearnLabelClassifier(LogisticRegression()))
learner_inter = Team(
attach=SklearnAttachClassifier(LogisticRegression()),
label=SklearnLabelClassifier(LogisticRegression()))
# note: these are chosen a bit randomly
p_intra = JointPipeline(learner_attach=learner_intra.attach,
learner_label=learner_intra.label,
decoder=MST_DECODER)
p_inter = PostlabelPipeline(learner_attach=learner_inter.attach,
learner_label=learner_inter.label,
decoder=MST_DECODER)
parsers = [mk_p(IntraInterPair(intra=p_intra,
inter=p_inter))
for mk_p in [SentOnlyParser, SoftParser, HeadToHeadParser]]
for parser in parsers:
self._test_parser(parser)
| gpl-3.0 |
timoMa/vigra | vigranumpy/examples/graph_3cycles.py | 7 | 1476 | import vigra
import vigra.graphs as graphs
import pylab
import numpy
import matplotlib
# parameter:
filepath = '100075.jpg' # input image path
sigmaGradMag = 3.0 # sigma Gaussian gradient
superpixelDiameter = 100 # super-pixel size
slicWeight = 50.0 # SLIC color - spatial weight
# load image and convert to LAB
img = vigra.impex.readImage(filepath)
# get super-pixels with slic on LAB image
imgLab = vigra.colors.transform_RGB2Lab(img)
labels, nseg = vigra.analysis.slicSuperpixels(imgLab, slicWeight,
superpixelDiameter)
labels = vigra.analysis.labelImage(labels)
# compute gradient
imgLabBig = vigra.resize(imgLab, [imgLab.shape[0]*2-1, imgLab.shape[1]*2-1])
gradMag = vigra.filters.gaussianGradientMagnitude(imgLab, sigmaGradMag)
gradMagBig = vigra.filters.gaussianGradientMagnitude(imgLabBig, sigmaGradMag*2.0)
# get 2D grid graph and edgeMap for grid graph
# from gradMag of interpolated image
gridGraph = graphs.gridGraph(img.shape[0:2])
gridGraphEdgeIndicator = graphs.edgeFeaturesFromInterpolatedImage(gridGraph,
gradMagBig)
# get region adjacency graph from super-pixel labels
rag = graphs.regionAdjacencyGraph(gridGraph, labels)
cycles = graphs.find3CyclesEdges(rag)
for c in range(cycles.shape[0]):
cic = cycles[c,:]
f = numpy.zeros(rag.edgeNum)
f[cic] = 1
rag.showEdgeFeature(img, f)
vigra.show()
| mit |
armsd/aRMSD | armsd/aplot.py | 1 | 56945 | """
aRMSD plot functions
(c) 2017 by Arne Wagner
"""
# Authors: Arne Wagner
# License: MIT
from __future__ import absolute_import, division, print_function
from builtins import range
import sys
try:
import numpy as np
except ImportError:
pass
try:
from vtk import (vtkCellPicker, vtkSphereSource, vtkLineSource, vtkTubeFilter, vtkPolyDataMapper, vtkActor,
vtkRenderer, vtkRenderWindow, vtkRenderWindowInteractor, vtkRenderLargeImage, vtkPNGWriter,
vtkWindowToImageFilter, vtkCamera, vtkVectorText, vtkFollower, vtkArrowSource, vtkCubeSource,
vtkLegendBoxActor, vtkMath, vtkMatrix4x4, vtkTransformPolyDataFilter, vtkTransform, vtkLookupTable,
vtkScalarBarActor, vtkScalarBarWidget, vtkInteractorStyleTrackballCamera, vtkProperty,
vtkPropPicker, VTK_VERSION)
has_vtk, vtk_version = True, VTK_VERSION
except ImportError:
has_vtk = False
vtk_version = 'Module not available'
try:
import matplotlib as mpl
has_mpl, mpl_version = True, mpl.__version__
if sys.version_info <= (3,0):
mpl.use('QT4Agg') # Set MPL backend to QT4Agg
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
except ImportError:
has_mpl = False
mpl_version = 'Module not available'
try:
from uncertainties import unumpy as unp
from uncertainties import ufloat
has_uc = True
except ImportError:
try:
import unumpycore as unp
from ucore import ufloat, ufloat_fromstr
except ImportError:
pass
# Matplotlib/pyplot settings, Set Backend to QT4Agg
# C:\Python\Lib\site-packages\matplotlib\mpl-data\matplotlibrc
almost_black = '#262626'
mpl.rcParams['savefig.dpi'] = 600
mpl.rcParams['savefig.bbox'] = 'tight'
mpl.rcParams['axes.edgecolor'] = almost_black
mpl.rcParams['axes.labelcolor'] = almost_black
# Copy structural properties from core module
def geo_distance(xyz1, xyz2):
""" Global function for distance calculation - compatible with uncertainties
coordinates are assumed to be uarrays """
return np.sum((xyz1 - xyz2)**2)**0.5
def geo_angle(xyz1, xyz2, xyz3):
""" Global function for angle calculation - compatible with uncertainties
coordinates are assumed to be uarrays """
v1, v2 = xyz1 - xyz2, xyz3 - xyz2
dv1_dot_dv2 = np.sum(v1**2)**0.5 * np.sum(v2**2)**0.5
return (180.0/np.pi) * unp.arccos(np.dot(v1, v2) / dv1_dot_dv2)
def geo_torsion(xyz1, xyz2, xyz3, xyz4):
""" Global function for torsion calculation - compatible with uncertainties
coordinates are assumed to be uarrays """
b0 = -1.0 * (xyz2 - xyz1)
b1 = xyz3 - xyz2
b2 = xyz4 - xyz3
b0xb1, b1xb2 = np.cross(b0, b1), np.cross(b2, b1) # Planes defined by the vectors
b0xb1_x_b1xb2 = np.cross(b0xb1, b1xb2)
y = np.dot(b0xb1_x_b1xb2, b1) * (1.0 / np.sum(b1**2)**0.5)
x = np.dot(b0xb1, b1xb2)
return np.abs((180.0/np.pi) * unp.arctan2(y, x)) # Ignore sign of the dihedral angle
###############################################################################
# VTK ROUTINES
###############################################################################
class aRMSD_substructure_picker(vtkInteractorStyleTrackballCamera):
""" Class for the fractional coordinates / aRMSD substructure selection """
def __init__(self, settings, atoms_to_pick, align, plot_type, picker_type):
""" Initializes the picker interactor """
self.plot_type = plot_type
self.AddObserver('LeftButtonPressEvent', self.leftButtonPressEvent)
# Arrays for picked atoms and actors
self.PickedAtoms, self.PickedActors = np.array([], dtype=np.int), np.array([], dtype=np.int)
self.LastPickedActor = None
self.LastPickedProperty = vtkProperty()
self.actors_to_pick = np.asarray(atoms_to_pick)
self.picker_color = settings.picker_col_rgb
self.picker_type = picker_type
self.NewPickedActor = None
self.sym_idf = align.sym_idf
self.bnd_idx = align.bnd_idx
self.colors = align.col_glob_rgb
def full_connects(self, idx):
""" Determines the all positions ultimately attached to the given atom """
def _is_connected_to(idx):
""" Determines the connections of the given index """
ravel_bnds = np.ravel(self.bnd_idx[np.where(self.bnd_idx == idx)[0]])
pos = np.where(ravel_bnds != idx)[0]
return ravel_bnds[pos]
# Set up initial connection array and evaluate first index
connection_array = np.asarray(idx, dtype=np.int)
connection_array = np.unique(np.hstack((connection_array, _is_connected_to(idx))))
checked_pos = [idx] # This list contains all positions that have been checked
if len(connection_array) == 1: # No atoms are connected to the picked one
pass
else:
while True: # Stay in this loop until no additional indices are added
old_len = len(connection_array)
for pos in connection_array:
if pos not in checked_pos: # Evaluate only once
connection_array = np.unique(np.hstack((connection_array, _is_connected_to(pos))))
checked_pos.append(pos)
new_len = len(connection_array)
if new_len == old_len: # Exit loop if no changes occurred after all position were checked
break
return connection_array
def click_message(self, sym_idf, picker_type):
""" Message displayed to user when an atom is clicked """
if self.plot_type == 'substructure':
print("> Atom "+str(sym_idf)+" has been added to 'substructure 1' ...")
elif self.plot_type == 'fractional':
if picker_type == 'cluster':
print("> All atoms connected to "+str(sym_idf)+" will be removed ...")
else:
print("> Atom "+str(sym_idf)+" will be removed ...")
def second_click_message(self, sym_idf, picker_type):
""" Message displayed to user when a selected atom is clicked """
if self.plot_type == 'substructure':
print("> Atom "+str(sym_idf)+" has been removed from 'substructure 1' ...")
elif self.plot_type == 'fractional':
if picker_type == 'cluster':
print("> Removal of all atoms connected to "+str(sym_idf)+" was cancelled ...")
else:
print("> Removal of atom "+str(sym_idf)+" was cancelled ...")
def leftButtonPressEvent(self, obj, event):
""" Event that will happen on left mouse click """
clickPos = self.GetInteractor().GetEventPosition() # Get the clicked position
picker = vtkPropPicker()
picker.Pick(clickPos[0], clickPos[1], 0, self.GetDefaultRenderer())
self.NewPickedActor = picker.GetActor() # Get the actual actor on the clicked position
# If an actor/atom has been selected (only selective pick events via actors_to_pick)
if self.NewPickedActor is not None and self.NewPickedActor in self.actors_to_pick:
atom_idx = int(np.where(self.NewPickedActor == self.actors_to_pick)[0]) # Index of the atom
if atom_idx not in self.PickedAtoms: # Select only if it wasn't selected so far
# Highlight the atom by changing the color
self.click_message(self.sym_idf[atom_idx], self.picker_type)
self.NewPickedActor.GetProperty().SetColor(self.picker_color)
if self.picker_type == 'cluster':
all_positions = self.full_connects(atom_idx)
self.PickedActors = np.unique(np.append(self.PickedActors, self.actors_to_pick[all_positions]))
self.PickedAtoms = np.unique(np.append(self.PickedAtoms, all_positions))
# Change colors for all atoms
[actor.GetProperty().SetColor(self.picker_color) for actor in self.PickedActors]
else:
self.PickedActors = np.unique(np.append(self.PickedActors, self.actors_to_pick[atom_idx]))
self.PickedAtoms = np.unique(np.append(self.PickedAtoms, atom_idx))
else: # Remove duplicates
self.second_click_message(self.sym_idf[atom_idx], self.picker_type)
if self.picker_type == 'cluster': # Change all connected atoms
all_positions = self.full_connects(atom_idx)
pos_in_picked_atoms = np.ravel(np.asarray([np.where(self.PickedAtoms == pos)[0]
for pos in all_positions]))
self.PickedActors = np.unique(np.asarray([np.delete(self.PickedActors, np.where(self.PickedActors == self.actors_to_pick[pos])[0]) for pos in all_positions])) # Remove actor from array
self.PickedAtoms = np.unique(np.delete(self.PickedAtoms, pos_in_picked_atoms, axis=0)) # Remove atomic index from index array
[actor.GetProperty().SetColor(self.colors) for actor in self.PickedActors] # Change colors for all atoms
else:
self.PickedActors = np.unique(np.delete(self.PickedActors, np.where(self.PickedActors == self.actors_to_pick[atom_idx])[0])) # Remove actor from array
self.PickedAtoms = np.unique(np.delete(self.PickedAtoms, np.where(self.PickedAtoms == atom_idx)[0])) # Remove atomic index from index array
self.NewPickedActor.GetProperty().SetColor(self.colors) # Reset the color to the initial value
self.OnLeftButtonDown()
return
# ---------------------------------------------------------------------------------
class aRMSD_plot_picker(vtkInteractorStyleTrackballCamera):
""" Class for picking events in the aRMSD plot """
def __init__(self, settings, atoms_to_pick, align):
""" Initializes the picker interactor """
self.AddObserver('LeftButtonPressEvent', self.leftButtonPressEvent)
self.PickedAtoms, self.PickedActors = [], [] # Lists for picked atoms and actors
self.LastPickedActor = None
self.LastPickedProperty = vtkProperty()
self.actors_to_pick = np.asarray(atoms_to_pick)
self.picker_color = settings.picker_col_rgb
self.std_type = settings.std_type
self.calc_prec = settings.calc_prec
self.use_std = settings.use_std
self.sym_idf = align.sym_idf
self.coords = align.cor
self.coords_mol1 = align.cor_mol1_kbs
self.coords_mol2 = align.cor_mol2_kbs
self.coords_std_mol1 = align.cor_mol1_kbs_std
self.coords_std_mol2 = align.cor_mol2_kbs_std
self.colors = align.col_at_rgb
self.name_mol1 = align.name1
self.name_mol2 = align.name2
self.RMSD_per_atom = align.msd_sum**0.5
self.rmsd_perc = (align.msd_sum / np.sum(align.msd_sum)) * 100 # Contribution of individual atom types
def calc_picker_property(self, list_of_picks):
""" Calculates distances, angles or dihedral angles with or without uncertainties """
def _proper_std(stds, list_of_picks):
if self.std_type == 'simple': # Check only if stds exist
return True
else: # H/and some heavy atoms may have no stds
return 0.0 not in np.sum(stds[np.asarray(list_of_picks)], axis=1)
def _per_mol(coords, stds):
""" Calculate for one molecule """
if self.use_std: # Combine coordinates and uncertainties to array
xyz = unp.uarray(coords[np.asarray(list_of_picks)], stds[np.asarray(list_of_picks)])
else:
xyz = coords[np.asarray(list_of_picks)]
if len(list_of_picks) == 2: # Distance
value = geo_distance(xyz[0], xyz[1])
elif len(list_of_picks) == 3: # Angle
value = geo_angle(xyz[0], xyz[1], xyz[2])
elif len(list_of_picks) == 4: # Torsion angle
value = geo_torsion(xyz[0], xyz[1], xyz[2], xyz[3])
return ufloat(value.nominal_values, 0.0) if not _proper_std(stds, list_of_picks) else value
p1, p2 = _per_mol(self.coords_mol1, self.coords_std_mol1), _per_mol(self.coords_mol2, self.coords_std_mol2)
delta = p2 - p1
return p1, p2, delta
def calc_property(self, list_of_picks):
""" Calculates different structural properties """
def apply_format(value, n_digits):
str_len = 12
ft_str_norm = '{:3.2f}'
if n_digits != 0:
ft_str_norm = '{:'+str(n_digits)+'.'+str(n_digits)+'f}'
ft_str_unce = '{:.1uS}' # One digit for values with uncertainties
if self.use_std: # If standard deviations exist
if value.std_dev == 0.0 or n_digits == 0: # Different format for values without standard deviations
if n_digits == 0:
str_len = 5
add = str_len - len(ft_str_norm.format(value.nominal_value))
if n_digits == 0 and value.nominal_value < 10.0:
return '0'+ft_str_norm.format(value.nominal_value)+' '*(add-1)
else:
return ft_str_norm.format(value.nominal_value)+' '*add
else:
add = str_len - len(ft_str_unce.format(value))
return ft_str_unce.format(value)+' '*add
else: # No ufloat values
return ft_str_norm.format(value)
def print_values(values, n_digits, unit=' deg.'):
print('\n '+str(self.name_mol1)+': '+apply_format(values[0], n_digits)+unit+
'\n '+str(self.name_mol2)+': '+apply_format(values[1], n_digits)+unit+
'\t\tDiff. = '+apply_format(values[2], n_digits)+unit)
if len(list_of_picks) == 1: # Show RMSD contribution of the atom
print('\nAtom [' +str(self.sym_idf[list_of_picks[0]])+']: RMSD = '+
apply_format(self.RMSD_per_atom[list_of_picks[0]], 3)+
' Angstrom ('+apply_format(self.rmsd_perc[list_of_picks[0]], 0)+' % of the total RMSD)')
elif len(list_of_picks) == 2: # Calculate distance
d1, d2, delta = self.calc_picker_property(list_of_picks)
print('\nDistance between: ['+str(self.sym_idf[list_of_picks[0]])+' -- '+
str(self.sym_idf[list_of_picks[1]])+']')
print_values([d1, d2, delta], n_digits=5, unit=' A')
elif len(list_of_picks) == 3: # Calculate angle
a1, a2, delta = self.calc_picker_property(list_of_picks)
print('\nAngle between: ['+str(self.sym_idf[list_of_picks[0]])+' -- '+
str(self.sym_idf[list_of_picks[1]])+' -- '+str(self.sym_idf[list_of_picks[2]])+']')
print_values([a1, a2, delta], n_digits=5, unit=' deg.')
elif len(list_of_picks) == 4: # Calculate dihedral angle
t1, t2, delta = self.calc_picker_property(list_of_picks)
print('\nDihedral between: ['+str(self.sym_idf[list_of_picks[0]])+' -- '+
str(self.sym_idf[list_of_picks[1]])+' -- '+str(self.sym_idf[list_of_picks[2]])+' -- '+
str(self.sym_idf[list_of_picks[3]])+']')
print_values([t1, t2, delta], n_digits=5, unit=' deg.')
def leftButtonPressEvent(self, obj, event):
""" Event that will happen on left mouse click """
clickPos = self.GetInteractor().GetEventPosition() # Get the clicked position
picker = vtkPropPicker()
picker.Pick(clickPos[0], clickPos[1], 0, self.GetDefaultRenderer())
self.NewPickedActor = picker.GetActor() # Get the actual actor on the clicked position
# If an actor/atom has been selected (only selective pick events via actors_to_pick)
if self.NewPickedActor is not None and self.NewPickedActor in self.actors_to_pick:
atom_idx = int(np.where(self.NewPickedActor == self.actors_to_pick)[0]) # Index of the atom
if len(self.PickedAtoms) <= 3: # Maximum selection will be 4 atoms
if atom_idx not in self.PickedAtoms: # Select only if it wasn't selected so far
self.PickedActors.append(self.actors_to_pick[atom_idx])
self.PickedAtoms.append(atom_idx)
self.calc_property(self.PickedAtoms)
# Highlight the atom by changing the color
self.NewPickedActor.GetProperty().SetColor(self.picker_color)
else: # Remove duplicates
self.PickedActors.remove(self.actors_to_pick[atom_idx]) # Remove actor from list
self.PickedAtoms.remove(atom_idx) # Remove atomic index from indices list
self.calc_property(self.PickedAtoms)
# Reset the color to the initial value
self.NewPickedActor.GetProperty().SetColor(self.colors[atom_idx])
else: # Reset all colors
colors = [self.colors[index] for index in self.PickedAtoms]
[self.PickedActors[index].GetProperty().SetColor(colors[index]) for
index in range(len(self.PickedActors))]
self.PickedActors, self.PickedAtoms = [], [] # Empty the lists
self.OnLeftButtonDown()
return
class Molecular_Viewer_vtk(object):
""" A molecular viewer object based on vtk used for 3d plots """
def __init__(self, settings):
""" Initializes object and creates the renderer and camera """
self.ren = vtkRenderer()
self.ren_win = vtkRenderWindow()
self.ren_win.AddRenderer(self.ren)
self.ren.SetBackground(settings.backgr_col_rgb)
self.ren.SetUseDepthPeeling(settings.use_depth_peel)
self.title = 'aRMSD Structure Visualizer'
self.magnif = None
self.save_counts = 0
self.picker = None
# Create the active camera
self.camera = vtkCamera()
self.camera.SetPosition(np.array([0.0, 0.0, 50]))
self.ren.SetActiveCamera(self.camera)
self.bnd_eps = 1.0E-03
self.at_actors_list = [] # List of atomic actors (for pick events)
# Create a renderwindowinteractor
self.iren = vtkRenderWindowInteractor()
self.iren.SetRenderWindow(self.ren_win)
def show(self, molecule1, molecule2, settings):
""" Shows the results in a new window """
self.magnif = settings.magnif_fact # Copy magnification information from settings
# Determine file names for screenshots
if self.plot_type == 'initial':
self.png_file_name = 'VTK_initial_plot'
elif self.plot_type == 'inertia':
self.png_file_name = 'VTK_inertia_plot'
elif self.plot_type == 'aRMSD':
self.png_file_name = 'VTK_aRMSD_plot'
elif self.plot_type == 'superpos':
self.png_file_name = 'VTK_superposition_plot'
elif self.plot_type == 'substructure':
self.png_file_name = 'VTK_substructure_plot'
elif self.plot_type == 'fractional':
self.png_file_name = 'VTK_fractional_plot'
if self.has_cam_vtk: # Set camera properties (if they exist...)
self.camera.SetPosition(self.cam_vtk_pos)
self.camera.SetFocalPoint(self.cam_vtk_focal_pt)
self.camera.SetViewUp(self.cam_vtk_view_up)
self.iren.Initialize()
self.ren_win.SetSize(settings.window_size)
self.ren_win.SetWindowName(self.title)
self.iren.AddObserver('KeyPressEvent', self.keypress) # Key events for screenshots, etc.
self.ren_win.Render()
self.iren.Start()
# Determine the camera properties of the final orientation and store them
molecule1.cam_vtk_pos, molecule2.cam_vtk_pos = self.camera.GetPosition(), self.camera.GetPosition()
molecule1.cam_vtk_wxyz, molecule2.cam_vtk_wxyz = self.camera.GetOrientationWXYZ(), self.camera.GetOrientationWXYZ()
molecule1.cam_vtk_focal_pt, molecule2.cam_vtk_focal_pt = self.camera.GetFocalPoint(), self.camera.GetFocalPoint()
molecule1.cam_vtk_view_up, molecule2.cam_vtk_view_up = self.camera.GetViewUp(), self.camera.GetViewUp()
molecule1.has_cam_vtk, molecule2.has_cam_vtk = True, True
del self.ren_win, self.iren
if self.picker is not None and self.plot_type in ['substructure', 'fractional']:
return np.ravel(np.asarray(self.picker.PickedAtoms, dtype=np.int))
#self.close_window()
def close_window(self):
""" Not working, but intended to close the window """
self.ren_win.Finalize()
self.iren.TerminateApp()
def keypress(self, obj, event):
""" Function that handles key pressing events """
key = obj.GetKeySym()
if key == 's': # Screenshots
render_large = vtkRenderLargeImage()
render_large.SetInput(self.ren)
render_large.SetMagnification(self.magnif)
writer = vtkPNGWriter()
writer.SetInputConnection(render_large.GetOutputPort())
if self.save_counts == 0: # Make sure that screenshots are not overwritten by default
export_file_name = self.png_file_name+'.png'
else:
export_file_name = self.png_file_name+'_'+str(self.save_counts)+'.png'
writer.SetFileName(export_file_name)
self.ren_win.Render()
writer.Write()
print('\n> The image was saved as '+export_file_name+' !')
self.save_counts += 1 # Remember save event
del render_large
elif key == 'b': # Add or remove a bond
pass
elif key == 'h': # Display help
print("\n> Press the 's' button to save the scene as .png file")
def add_principal_axes(self, com, pa, length, col, settings):
""" Adds the principal axes of rotation to the view """
startPoint, endPoint = com*settings.scale_glob, (pa*2 + com)*settings.scale_glob
normalizedX, normalizedY, normalizedZ = np.zeros(3, dtype=np.float), np.zeros(3, dtype=np.float), \
np.zeros(3, dtype=np.float)
arrow = vtkArrowSource()
arrow.SetShaftResolution(settings.res_atom)
arrow.SetTipResolution(settings.res_atom)
arrow.SetShaftRadius(0.005*10)
arrow.SetTipLength(0.4)
arrow.SetTipRadius(0.01*10)
# The X axis is a vector from start to end
math = vtkMath()
math.Subtract(endPoint, startPoint, normalizedX)
length = math.Norm(normalizedX)
math.Normalize(normalizedX)
# The Z axis is an arbitrary vector cross X
arbitrary = np.asarray([0.2, -0.3, 1.7])
math.Cross(normalizedX, arbitrary, normalizedZ)
math.Normalize(normalizedZ)
# The Y axis is Z cross X
math.Cross(normalizedZ, normalizedX, normalizedY)
matrix = vtkMatrix4x4()
matrix.Identity() # Create the direction cosine matrix
for i in range(3):
matrix.SetElement(i, 0, normalizedX[i])
matrix.SetElement(i, 1, normalizedY[i])
matrix.SetElement(i, 2, normalizedZ[i])
# Apply the transforms
transform = vtkTransform()
transform.Translate(startPoint)
transform.Concatenate(matrix)
transform.Scale(length, length, length)
# Transform the polydata
transformPD = vtkTransformPolyDataFilter()
transformPD.SetTransform(transform)
transformPD.SetInputConnection(arrow.GetOutputPort())
# Create a mapper and connect it to the source data, set up actor
mapper = vtkPolyDataMapper()
mapper.SetInputConnection(transformPD.GetOutputPort())
arrow_actor = vtkActor()
arrow_actor.GetProperty().SetColor(col)
arrow_actor.GetProperty().SetLighting(settings.use_light)
arrow_actor.GetProperty().SetOpacity(settings.alpha_arrow)
arrow_actor.GetProperty().ShadingOn()
arrow_actor.SetMapper(mapper)
self.ren.AddActor(arrow_actor)
def add_arrow(self, direction, length, settings):
""" Adds a single arrow defined by length and reference axis """
# Generate start and end points based on length and reference axis
if direction == 'x':
startPoint, endPoint = np.asarray([-length, 0.0, 0.0]), np.asarray([length, 0.0, 0.0])
elif direction == 'y':
startPoint, endPoint = np.asarray([0.0, -length, 0.0]), np.asarray([0.0, length, 0.0])
elif direction == 'z':
startPoint, endPoint = np.asarray([0.0, 0.0, -length]), np.asarray([0.0, 0.0, length])
normalizedX, normalizedY, normalizedZ = np.zeros(3, dtype=np.float), np.zeros(3, dtype=np.float), \
np.zeros(3, dtype=np.float)
arrow = vtkArrowSource()
arrow.SetShaftResolution(settings.res_atom)
arrow.SetTipResolution(settings.res_atom)
arrow.SetShaftRadius(0.005)
arrow.SetTipLength(0.12)
arrow.SetTipRadius(0.02)
# The X axis is a vector from start to end
math = vtkMath()
math.Subtract(endPoint, startPoint, normalizedX)
length = math.Norm(normalizedX)
math.Normalize(normalizedX)
# The Z axis is an arbitrary vector cross X
arbitrary = np.asarray([0.2, -0.3, 1.7])
math.Cross(normalizedX, arbitrary, normalizedZ)
math.Normalize(normalizedZ)
# The Y axis is Z cross X
math.Cross(normalizedZ, normalizedX, normalizedY)
matrix = vtkMatrix4x4()
matrix.Identity() # Create the direction cosine matrix
for i in range(3):
matrix.SetElement(i, 0, normalizedX[i])
matrix.SetElement(i, 1, normalizedY[i])
matrix.SetElement(i, 2, normalizedZ[i])
# Apply the transforms
transform = vtkTransform()
transform.Translate(startPoint)
transform.Concatenate(matrix)
transform.Scale(length, length, length)
# Transform the polydata
transformPD = vtkTransformPolyDataFilter()
transformPD.SetTransform(transform)
transformPD.SetInputConnection(arrow.GetOutputPort())
# Create a mapper and connect it to the source data, set up actor
mapper = vtkPolyDataMapper()
mapper.SetInputConnection(transformPD.GetOutputPort())
arrow_actor = vtkActor()
arrow_actor.GetProperty().SetColor(settings.arrow_col_rgb)
arrow_actor.GetProperty().SetLighting(settings.use_light)
arrow_actor.GetProperty().SetOpacity(settings.alpha_arrow)
arrow_actor.GetProperty().ShadingOn()
arrow_actor.SetMapper(mapper)
self.ren.AddActor(arrow_actor)
def add_atom(self, pos, radius, color, settings):
""" Adds a single atom as vtkSphere with defined radius and color at the given position """
# Create new SphereSource and define its properties
atom = vtkSphereSource()
atom.SetCenter(pos)
atom.SetRadius(radius*settings.scale_at)
atom.SetPhiResolution(settings.res_atom)
atom.SetThetaResolution(settings.res_atom)
# Create a mapper and connect it to the source data, set up actor
mapper = vtkPolyDataMapper()
mapper.SetInputConnection(atom.GetOutputPort())
at_actor = vtkActor()
at_actor.GetProperty().SetColor(color)
at_actor.GetProperty().SetOpacity(settings.alpha_at)
at_actor.GetProperty().SetLighting(settings.use_light)
at_actor.GetProperty().ShadingOn()
at_actor.SetMapper(mapper)
self.ren.AddActor(at_actor)
self.at_actors_list.append(at_actor)
def add_com(self, molecule, radius, color, settings):
""" Adds center of mass """
self.add_atom(molecule.com*settings.scale_glob, radius, color, settings)
def add_all_atoms(self, molecule, settings):
""" Wrapper for the addition of all atoms from the molecule """
if settings.name == 'Wireframe': # Wireframe plot style
radii = np.repeat(0.76, molecule.n_atoms)
color = molecule.col_at_rgb
elif self.plot_type == 'substructure': # Substructure selection
radii = np.repeat(1.5, molecule.n_atoms)
color = np.transpose(np.repeat(molecule.col_glob_rgb,
molecule.n_atoms).reshape((3, molecule.n_atoms)))
else:
radii = molecule.rad_plt_vtk
color = molecule.col_at_rgb
[self.add_atom(molecule.cor[atom]*settings.scale_glob, radii[atom],
color[atom], settings) for atom in range(molecule.n_atoms)]
def add_all_atoms_superpos(self, align, settings):
""" Wrapper for the addition of all atoms for the superposition plot """
if settings.name == 'Wireframe':
radii = np.repeat(0.76, align.n_atoms)
else:
radii = align.rad_plt_vtk
[self.add_atom(align.cor_mol1_kbs[atom]*settings.scale_glob, radii[atom],
align.col_at_mol1_rgb[atom], settings) for atom in range(align.n_atoms)]
[self.add_atom(align.cor_mol2_kbs[atom]*settings.scale_glob, radii[atom],
align.col_at_mol2_rgb[atom], settings) for atom in range(align.n_atoms)]
def add_bond(self, first_loc, second_loc, color, settings):
""" Adds a single bond as vtkLine between two locations """
if np.linalg.norm(first_loc - second_loc) > self.bnd_eps:
# Create LineSource and set start and end point
bnd_source = vtkLineSource()
bnd_source.SetPoint1(first_loc)
bnd_source.SetPoint2(second_loc)
# Create a TubeFilter around the line
TubeFilter = vtkTubeFilter()
TubeFilter.SetInputConnection(bnd_source.GetOutputPort())
TubeFilter.SetRadius(settings.rad_bnd)
TubeFilter.SetNumberOfSides(settings.res_bond)
TubeFilter.CappingOn()
# Map data, create actor and set the color
mapper = vtkPolyDataMapper()
mapper.SetInputConnection(TubeFilter.GetOutputPort())
bnd_actor = vtkActor()
bnd_actor.GetProperty().SetColor(color)
bnd_actor.GetProperty().SetOpacity(settings.alpha_at)
bnd_actor.GetProperty().SetLighting(settings.use_light)
bnd_actor.GetProperty().ShadingOn()
bnd_actor.SetMapper(mapper)
self.ren.AddActor(bnd_actor)
def add_kabsch_bond(self, first_loc, second_loc, color1, color2, color3, settings):
""" Makes a single bond as a combination of three segments """
if np.allclose(color1, color2):
self.add_bond(first_loc, second_loc, color1, settings)
else:
diff = (second_loc - first_loc) / 3.0
# Add all thirds to actor list
self.add_bond(first_loc, first_loc+diff, color1, settings)
self.add_bond(first_loc+diff, first_loc+2*diff, color2, settings)
self.add_bond(first_loc+2*diff, second_loc, color3, settings)
def add_all_bonds_regular(self, molecule, settings):
""" Wrapper for the addition of all bonds from the molecule """
if self.plot_type == 'substructure':
color = np.transpose(np.repeat(molecule.col_glob_rgb,
molecule.n_bonds).reshape((3, molecule.n_bonds)))
else:
color = molecule.col_bnd_rgb
[self.add_bond(molecule.cor[molecule.bnd_idx[bond][0]]*settings.scale_glob,
molecule.cor[molecule.bnd_idx[bond][1]]*settings.scale_glob,
color[bond], settings) for bond in range(molecule.n_bonds)]
def add_all_bonds_disordered(self, molecule1, molecule2, settings):
""" Wrapper for the addition of all disordered positions between the molecules """
if settings.n_dev > 0 and molecule1.disord_pos is not None:
color_rgb = np.asarray(settings.col_disord_rgb) # RGB color for disordered positions
disord_col = np.transpose(np.repeat(color_rgb, settings.n_dev).reshape((3, settings.n_dev)))
[self.add_bond(molecule1.cor[molecule1.disord_pos[pos]]*settings.scale_glob,
molecule2.cor[molecule1.disord_pos[pos]]*settings.scale_glob,
disord_col[pos], settings) for pos in range(settings.n_dev)]
def add_all_bonds_kabsch(self, align, settings):
""" Wrapper for the addition of all bonds (Kabsch) from the molecule """
if align.chd_bnd_col_rgb is None: # Check if changed bonds exist at all - if they don't: use normal bonds
[self.add_bond(align.cor[align.bnd_idx[bond][0]]*settings.scale_glob,
align.cor[align.bnd_idx[bond][1]]*settings.scale_glob,
align.col_bnd_glob_rgb, settings) for bond in range(align.n_bonds)]
[self.add_kabsch_bond(align.cor[align.bnd_idx[bond][0]]*settings.scale_glob,
align.cor[align.bnd_idx[bond][1]]*settings.scale_glob,
align.col_bnd_rgb[bond], align.chd_bnd_col_rgb[bond], align.col_bnd_rgb[bond], settings)
for bond in range(align.n_bonds)]
def add_all_bonds_superpos(self, align, settings):
""" Wrapper for the addition of all bonds for the superposition plot """
[self.add_bond(align.cor_mol1_kbs[align.bnd_idx[bond][0]]*settings.scale_glob,
align.cor_mol1_kbs[align.bnd_idx[bond][1]]*settings.scale_glob,
align.col_bnd_mol1_rgb[bond], settings) for bond in range(align.n_bonds)]
[self.add_bond(align.cor_mol2_kbs[align.bnd_idx[bond][0]]*settings.scale_glob,
align.cor_mol2_kbs[align.bnd_idx[bond][1]]*settings.scale_glob,
align.col_bnd_mol2_rgb[bond], settings) for bond in range(align.n_bonds)]
def add_label(self, coords, color, label):
""" Adds a label at the given coordinate """
source = vtkVectorText()
source.SetText(label)
mapper = vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
follower = vtkFollower()
follower.SetMapper(mapper)
follower.GetProperty().SetColor(color)
follower.SetPosition(coords)
follower.SetScale(0.4)
self.ren.AddActor(follower)
follower.SetCamera(self.ren.GetActiveCamera())
def add_all_labels(self, molecule, settings):
""" Wrapper for the addition of all labels for the molecule """
if settings.name == 'Wireframe':
radii = np.transpose(np.reshape(np.repeat((0.0, 0.0, 0.76), molecule.n_atoms), (3, molecule.n_atoms)))
elif self.plot_type == 'substructure':
radii = np.repeat(1.5, molecule.n_atoms)
else:
radii = np.transpose(np.vstack((np.zeros(molecule.n_atoms), np.zeros(molecule.n_atoms),
molecule.rad_plt_vtk)))
if settings.draw_labels:
label_color = [0.0, 0.0, 0.0]
if settings.label_type == 'full':
labels = molecule.sym_idf
elif settings.label_type == 'symbol_only':
labels = molecule.sym
[self.add_label(molecule.cor[atom]*settings.scale_glob+radii[atom]*settings.scale_at, label_color,
labels[atom]) for atom in range(molecule.n_atoms)]
def add_legend(self, molecule1, molecule2, settings):
""" Adds a legend to the VTK renderer """
cube_source = vtkCubeSource()
cube_source.SetBounds(-0.001,0.001,-0.001,0.001,-0.001,0.001)
mapper = vtkPolyDataMapper()
mapper.SetInputConnection(cube_source.GetOutputPort()) # connect source and mapper
cube_actor = vtkActor()
cube_actor.SetMapper(mapper);
cube_actor.GetProperty().SetColor(settings.backgr_col_rgb) # Set cube color to background
legendBox = vtkLegendBoxActor() # Adds the actual legend box
legendBox.SetBackgroundColor(settings.backgr_col_rgb) # NOT WORKING - why
legendBox.SetBorder(1) # No border
legendBox.SetBox(2)
legendBox.SetNumberOfEntries(2)
if self.plot_type == 'initial':
legendBox.SetEntry(0, cube_source.GetOutput(), molecule1.name, settings.col_model_rgb)
legendBox.SetEntry(1, cube_source.GetOutput(), molecule2.name, settings.col_refer_rgb)
elif self.plot_type == 'superpos':
legendBox.SetEntry(0, cube_source.GetOutput(), molecule2.name1, settings.col_model_fin_rgb)
legendBox.SetEntry(1, cube_source.GetOutput(), molecule2.name2, settings.col_refer_fin_rgb)
pos1, pos2 = legendBox.GetPositionCoordinate(), legendBox.GetPosition2Coordinate()
pos1.SetCoordinateSystemToView(), pos2.SetCoordinateSystemToView()
pos1.SetValue(.4, -1.0), pos2.SetValue(1.0, -0.75)
self.ren.AddActor(cube_actor)
self.ren.AddActor(legendBox)
def add_color_bar(self, settings):
""" Adds a color bar to the VTK scene """
# Generate and customize lookuptable
lut = vtkLookupTable()
lut.SetHueRange(1/3.0, 0.0) # From green to red
lut.SetSaturationRange(1.0, 1.0)
lut.SetValueRange(1.0, 1.0)
lut.SetAlphaRange(1.0, 1.0)
lut.SetNumberOfColors(settings.n_col_aRMSD)
lut.SetRange(0.0, settings.max_RMSD_diff) # Labels from 0.0 to max_RMSD
lut.Build() # Build the table
# Create the scalar_bar
scalar_bar = vtkScalarBarActor()
scalar_bar.SetTitle(' ') # Otherwise it causes a string error
scalar_bar.GetProperty().SetColor(0.0, 0.0, 0.0)
scalar_bar.SetLabelFormat('%-#6.2g') # Two digits
scalar_bar.SetNumberOfLabels(8)
scalar_bar.SetLookupTable(lut)
self.ren.AddActor(scalar_bar)
# Create the scalar_bar_widget
#scalar_bar_widget = vtkScalarBarWidget()
#scalar_bar_widget.SetInteractor(self.iren)
#scalar_bar_widget.SetScalarBarActor(scalar_bar)
#scalar_bar_widget.On()
def add_camera_setting(self, molecule):
""" Adds a camera orientation from the molecule to the VTK object """
self.has_cam_vtk = molecule.has_cam_vtk
self.cam_vtk_pos = molecule.cam_vtk_pos
self.cam_vtk_focal_pt = molecule.cam_vtk_focal_pt
self.cam_vtk_view_up = molecule.cam_vtk_view_up
def make_initial_plot(self, molecule1, molecule2, settings):
""" Calls all functions needed for the initial plot """
self.plot_type = 'initial'
arrow_length = np.max(np.abs(molecule2.cor)) * 1.25 * settings.scale_glob
self.add_all_atoms(molecule1, settings)
self.add_all_bonds_regular(molecule1, settings)
self.add_all_atoms(molecule2, settings)
self.add_all_bonds_regular(molecule2, settings)
self.add_all_bonds_disordered(molecule1, molecule2, settings)
self.add_all_labels(molecule1, settings)
self.add_all_labels(molecule2, settings)
if settings.draw_arrows: # Draw arrows
self.add_arrow('x', arrow_length, settings)
self.add_arrow('y', arrow_length, settings)
self.add_arrow('z', arrow_length, settings)
if settings.draw_labels: # Draw arrow labels
self.add_label([arrow_length, 0.0, 0.0], settings.arrow_col_rgb, 'X')
self.add_label([0.0, arrow_length, 0.0], settings.arrow_col_rgb, 'Y')
self.add_label([0.0, 0.0, arrow_length], settings.arrow_col_rgb, 'Z')
if settings.draw_legend: # Draw legend
self.add_legend(molecule1, molecule2, settings)
self.add_camera_setting(molecule2)
def make_inertia_plot(self, molecule1, molecule2, pa_mol1, pa_mol2, settings):
""" Calls all functions for the inertia tensor plot """
radius = 1.3
self.plot_type = 'inertia'
arrow_length = np.max(np.abs(molecule1.cor)) * 1.25 * settings.scale_glob
arrow_length2 = np.max(np.abs(molecule2.cor)) * 0.65 * settings.scale_glob
self.add_all_atoms(molecule1, settings)
self.add_all_bonds_regular(molecule1, settings)
self.add_all_atoms(molecule2, settings)
self.add_all_bonds_regular(molecule2, settings)
self.add_com(molecule1, radius, settings.col_model_inertia_rgb, settings)
self.add_com(molecule2, radius, settings.col_refer_inertia_rgb, settings)
self.add_principal_axes(molecule1.com, pa_mol1[0], arrow_length2, settings.col_model_inertia_rgb, settings)
self.add_principal_axes(molecule1.com, pa_mol1[1], arrow_length2, settings.col_model_inertia_rgb, settings)
self.add_principal_axes(molecule1.com, pa_mol1[2], arrow_length2, settings.col_model_inertia_rgb, settings)
self.add_principal_axes(molecule2.com, pa_mol2[0], arrow_length2, settings.col_refer_inertia_rgb, settings)
self.add_principal_axes(molecule2.com, pa_mol2[1], arrow_length2, settings.col_refer_inertia_rgb, settings)
self.add_principal_axes(molecule2.com, pa_mol2[2], arrow_length2, settings.col_refer_inertia_rgb, settings)
if settings.draw_arrows: # Draw arrows
self.add_arrow('x', arrow_length, settings)
self.add_arrow('y', arrow_length, settings)
self.add_arrow('z', arrow_length, settings)
self.add_camera_setting(molecule2)
def make_kabsch_plot(self, align, settings):
""" Calls all functions needed for the Kabsch plot """
self.plot_type = 'aRMSD'
self.add_all_atoms(align, settings)
self.add_all_bonds_kabsch(align, settings)
self.add_all_labels(align, settings)
self.add_camera_setting(align)
if settings.use_aRMSD_col and settings.draw_col_map: # If aRMSD colors are requested
self.add_color_bar(settings)
# Connect with picker
self.picker = aRMSD_plot_picker(settings, self.at_actors_list, align)
self.picker.SetDefaultRenderer(self.ren)
self.iren.SetInteractorStyle(self.picker)
def make_substructure_plot(self, align, settings):
""" Calls all functions needed for the substructure selection plot """
self.plot_type = 'substructure'
self.add_all_atoms(align, settings)
self.add_all_bonds_regular(align, settings)
self.add_all_labels(align, settings)
self.add_camera_setting(align)
# Connect with picker
self.picker = aRMSD_substructure_picker(settings, self.at_actors_list,
align, plot_type='substructure', picker_type='normal')
self.picker.SetDefaultRenderer(self.ren)
self.iren.SetInteractorStyle(self.picker)
def make_superpos_plot(self, align, settings):
""" Calls all functions needed for the superposition plot """
self.plot_type = 'superpos'
self.add_all_atoms_superpos(align, settings)
self.add_all_bonds_superpos(align, settings)
self.add_all_labels(align, settings)
if settings.draw_legend: # Draw legend
self.add_legend(align, align, settings)
self.add_camera_setting(align)
def make_fractional_plot(self, xray, settings, picker_type):
""" Calls all functions needed for the fractional coordinates plot """
self.plot_type = 'fractional'
self.add_all_atoms(xray, settings)
self.add_all_bonds_regular(xray, settings)
self.add_camera_setting(xray)
# Connect with picker
self.picker = aRMSD_substructure_picker(settings, self.at_actors_list, xray, self.plot_type, picker_type)
self.picker.SetDefaultRenderer(self.ren)
self.iren.SetInteractorStyle(self.picker)
class Molecular_Viewer_mpl(object):
""" A molecular viewer object based on matplotlib used for 3d plots """
def __init__(self):
""" Initializes the molecular viewer """
self.space = plt.figure() # Define plotting space and axes
self.axes = self.space.add_subplot(111)
self.axes.grid(False) # Switch off grid
self.axes.axis('off') # Switch off axis
self.n_plots = 1
def colorbar_plot(self, align, settings):
""" Contains all functions for the Kabsch plot in aRMSD representation """
# Set up color map, bounds for colorbar (rounded to second digit) and normalize boundary
cmap = mpl.colors.ListedColormap(align.plt_col_aRMSD)
spacing = 0.1
# Adjust the colorbar spacing for small and large RMSD distributions
if settings.max_RMSD_diff < 0.5:
spacing = 0.05
if settings.max_RMSD_diff >= 2.0:
spacing = 0.2
# 0.0 to settings.max_RMSD_diff with given spacing
bounds = np.around(np.arange(0.0, settings.max_RMSD_diff+0.1, spacing), 2)
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
# Create a second axes for the colorbar
self.axes2 = self.space.add_axes([0.88, 0.1, 0.03, 0.8]) # Global values (do not change)
# Create colorbar
mpl.colorbar.ColorbarBase(self.axes2, cmap=cmap, norm=norm,
spacing='proportional', ticks=bounds, boundaries=bounds)
# Set y label and label size
self.axes2.set_ylabel(r'RMSD / $\AA$', size=12)
self.space.subplots_adjust(left=0.0, right=1.0, top=1.0, bottom=0.0) # Set margins of the plot window
plt.show() # Show result
class Statistics_mpl(object):
""" A statistics object based on matplotlib used for 2d plots """
def __init__(self):
""" Initializes the main window via gridspec and defines plotting colors """
# Define subplot locations and set titles and grids
self.gs = gridspec.GridSpec(3,3, left=0.08, bottom=0.08, right=0.96, top=0.92, wspace=0.30, hspace=0.97)
self.ax1 = plt.subplot(self.gs[0, :-1])
self.ax2 = plt.subplot(self.gs[1:, :-1])
self.ax3 = plt.subplot(self.gs[0:, -1])
def plot(self):
""" Plots result """
mng = plt.get_current_fig_manager() # Open directly in full window
if mpl.get_backend() == 'Qt4Agg': # 'Qt4' backend
mng.window.showMaximized()
elif mpl.get_backend() == 'WxAgg': # 'WxAgg' backend
mng.frame.Maximize(True)
elif mpl.get_backend() == 'TKAgg': # 'TKAgg' backend
mng.frame.Maximize(True)
plt.show(all) # Show all plots
def linregress(self, x, y):
""" Calculate a least-squares regression for two sets of measurements (taken from scipy) """
eps = 1.0E-20
x, y = np.asarray(x), np.asarray(y)
n = len(x)
xmean, ymean = np.mean(x, None), np.mean(y, None)
# average sum of squares:
ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat
r_num = ssxym
r_den = np.sqrt(ssxm * ssym)
if r_den == 0.0:
r = 0.0
else:
r = r_num / r_den
# test for numerical error propagation
if r > 1.0:
r = 1.0
elif r < -1.0:
r = -1.0
df = n - 2
t = r * np.sqrt(df / ((1.0 - r + eps)*(1.0 + r + eps)))
slope = r_num / ssxm
intercept = ymean - slope*xmean
sterrest = np.sqrt((1 - r**2) * ssym / ssxm / df)
return slope, intercept, r, sterrest
def do_stats_quant(self, align, logger, settings, prop='bond_dist'):
""" Wrapper for the calculation and plotting of individual statistic evaluations """
# Details for the handling of the different quantities
if prop == 'bond_dist':
data_mol1 = align.bnd_dis_mol1
data_mol2 = align.bnd_dis_mol2
plot_color = settings.new_red
plt_axis = self.ax1
title_prefix = 'All Bond Distances:'
label_suffix = ' distances'
label_unit = r' $\AA$'
extra_space = 0.2
# Do actual statistics for the two data sets
m, b, r, sterrest = self.linregress(data_mol2, data_mol1)
limits, x_axis, rmsd = self.prep_simulation(data_mol2, data_mol1, settings)
logger.prop_bnd_dist_rmsd, logger.prop_bnd_dist_r_sq = rmsd, r**2 # Log quality descriptors
elif prop == 'bond_dist_types':
# Mean values
data_mol1 = np.asarray([np.mean(align.bnd_dis_mol1[align.bnd_type_pos[entry]])
for entry in range(align.n_bnd_types)])
data_mol2 = np.asarray([np.mean(align.bnd_dis_mol2[align.bnd_type_pos[entry]])
for entry in range(align.n_bnd_types)])
# Calculate error
if settings.error_prop == 'std': # Standard deviations
error_prop1 = np.asarray([np.std(align.bnd_dis_mol1[align.bnd_type_pos[entry]])
for entry in range(align.n_bnd_types)])
error_prop2 = np.asarray([np.std(align.bnd_dis_mol2[align.bnd_type_pos[entry]])
for entry in range(align.n_bnd_types)])
elif settings.error_prop == 'var': # Variances
error_prop1 = np.asarray([np.var(align.bnd_dis_mol1[align.bnd_type_pos[entry]])
for entry in range(align.n_bnd_types)])
error_prop2 = np.asarray([np.var(align.bnd_dis_mol2[align.bnd_type_pos[entry]])
for entry in range(align.n_bnd_types)])
plot_color = settings.new_red
plt_axis = self.ax2
title_prefix = 'Average Distances per Bond Type:'
label_suffix = ' distance types'
label_unit = r' $\AA$'
extra_space = 0.1 + np.max(np.hstack((error_prop1, error_prop2))) # Additional extra space for markers
# Do actual statistics for the two data sets
m, b, r, sterrest = self.linregress(data_mol2, data_mol1)
limits, x_axis, rmsd = self.prep_simulation(data_mol2, data_mol1, settings)
logger.prop_bnd_dist_type_rmsd, logger.prop_bnd_dist_type_r_sq = rmsd, r**2 # Log quality descriptors
if align.n_bnd_types <= 2:
logger.pt_warning_bond_types() # Warn user if 2 or less bond types were found
elif prop == 'angles':
data_mol1 = align.ang_deg_mol1
data_mol2 = align.ang_deg_mol2
plot_color = settings.new_green
plt_axis = self.ax3
title_prefix = 'All Angles:'
label_suffix = ' angles'
label_unit = r' $^\circ$'
extra_space = 3.5
# Do actual statistics for the two data sets
m, b, r, sterrest = self.linregress(data_mol2, data_mol1)
limits, x_axis, rmsd = self.prep_simulation(data_mol2, data_mol1, settings)
logger.prop_ang_rmsd, logger.prop_ang_r_sq = rmsd, r**2 # Log quality descriptors
elif prop == 'torsions':
data_mol1 = align.tor_deg_mol1
data_mol2 = align.tor_deg_mol2
plot_color = settings.new_blue
plt_axis = self.ax3
title_prefix = 'All Angles / Dihedrals:'
label_suffix = ' dihedrals'
label_unit = r' $^\circ$'
extra_space = 3.5
# Do actual statistics for the two data sets
m, b, r, sterrest = self.linregress(data_mol2, data_mol1)
limits, x_axis, rmsd = self.prep_simulation(data_mol2, data_mol1, settings)
logger.prop_tor_rmsd, logger.prop_tor_r_sq = rmsd, r**2 # Log quality descriptors
# Generate all titles and labels
ax_title = title_prefix + ' RMSE = ' + str(np.around(rmsd, settings.calc_prec_stats)) + label_unit
xlabel = align.name2+' /' + label_unit
ylabel = align.name1+' /' + label_unit
plt_axis.set_title(ax_title, fontsize=settings.title_pt)
plt_axis.set_xlabel(xlabel, fontsize=settings.ax_pt, style='italic')
plt_axis.set_ylabel(ylabel, fontsize=settings.ax_pt, style='italic')
plt_axis.grid(False)
label_data = str(len(data_mol2)) + label_suffix
label_fit = r'R$^2$ = '+str(np.around(r**2, settings.calc_prec_stats))
log_rmsd, log_r_sq = rmsd, r**2 # Log quality of correlation
# Plot linear correlation and fit / adjust axes limits
if prop == 'bond_dist_types':
plt_axis.errorbar(data_mol2, data_mol1, xerr=error_prop2, yerr=error_prop1, fmt="o",
ms=8.5, mfc=plot_color, mew=0.75, zorder=2, mec=plot_color, label=label_data)
[plt_axis.text(data_mol2[pos], data_mol1[pos] - 0.1,
align.bnd_label[pos], zorder=3, fontsize=13) for pos in range(align.n_bnd_types)]
add_lim = np.asarray([-0.1, 0.1], dtype=np.float)
limits += add_lim
else:
plt_axis.plot(data_mol2, data_mol1, "o", ms=8.5, mfc=plot_color, mew=0.75,
zorder=1, mec=plot_color, label=label_data)
plt_axis.plot(x_axis, m*x_axis+b, lw=2, zorder=1, color=plot_color, label=label_fit)
plt_axis.set_xlim([limits[0] - extra_space, limits[1] + extra_space])
plt_axis.set_ylim([limits[0] - extra_space, limits[1] + extra_space])
# Draw legend and add grid upon request
if settings.stats_draw_legend:
plt_axis.legend(loc=settings.legend_pos, frameon=False)
if settings.stats_draw_grid:
plt_axis.grid()
def prep_simulation(self, data1, data2, settings):
""" Calculates the RMSE of two data sets and generates axis for linear regression """
# Determine lowest and highest values of the combined data
stack = np.hstack((data1, data2))
limits = [np.min(stack), np.max(stack)]
# Calculate RMSD of the data sets
rmsd = np.around(np.sqrt(np.sum(np.abs(data2 - data1)**2 / len(data2))), 4)
# Determine step size and axis
step_size_all = ((limits[1] + settings.splitter) - (limits[0] - settings.splitter)) / len(data2)
axis = np.arange(limits[0] - settings.splitter, limits[1] + settings.splitter, step_size_all)
return limits, axis, rmsd
| mit |
theoryno3/scikit-learn | sklearn/gaussian_process/gaussian_process.py | 18 | 34542 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = np.log10(self.thetaL) \
+ self.random_state.rand(self.theta0.size).reshape(
self.theta0.shape) * np.log10(self.thetaU
/ self.thetaL)
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = check_array(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = check_array(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = check_array(self.thetaL)
self.thetaU = check_array(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.