repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ravindrapanda/tensorflow | tensorflow/examples/tutorials/input_fn/boston.py | 76 | 2920 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNNRegressor with custom input_fn for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import pandas as pd
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
COLUMNS = ["crim", "zn", "indus", "nox", "rm", "age",
"dis", "tax", "ptratio", "medv"]
FEATURES = ["crim", "zn", "indus", "nox", "rm",
"age", "dis", "tax", "ptratio"]
LABEL = "medv"
def get_input_fn(data_set, num_epochs=None, shuffle=True):
return tf.estimator.inputs.pandas_input_fn(
x=pd.DataFrame({k: data_set[k].values for k in FEATURES}),
y=pd.Series(data_set[LABEL].values),
num_epochs=num_epochs,
shuffle=shuffle)
def main(unused_argv):
# Load datasets
training_set = pd.read_csv("boston_train.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
test_set = pd.read_csv("boston_test.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Set of 6 examples for which to predict median house values
prediction_set = pd.read_csv("boston_predict.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Feature cols
feature_cols = [tf.feature_column.numeric_column(k) for k in FEATURES]
# Build 2 layer fully connected DNN with 10, 10 units respectively.
regressor = tf.estimator.DNNRegressor(feature_columns=feature_cols,
hidden_units=[10, 10],
model_dir="/tmp/boston_model")
# Train
regressor.train(input_fn=get_input_fn(training_set), steps=5000)
# Evaluate loss over one epoch of test_set.
ev = regressor.evaluate(
input_fn=get_input_fn(test_set, num_epochs=1, shuffle=False))
loss_score = ev["loss"]
print("Loss: {0:f}".format(loss_score))
# Print out predictions over a slice of prediction_set.
y = regressor.predict(
input_fn=get_input_fn(prediction_set, num_epochs=1, shuffle=False))
# .predict() returns an iterator of dicts; convert to a list and print
# predictions
predictions = list(p["predictions"] for p in itertools.islice(y, 6))
print("Predictions: {}".format(str(predictions)))
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
glouppe/scikit-learn | sklearn/cluster/tests/test_birch.py | 342 | 5603 | """
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
| bsd-3-clause |
dominicelse/scipy | scipy/stats/stats.py | 7 | 186886 | # Copyright 2002 Gary Strangman. All rights reserved
# Copyright 2002-2016 The SciPy Developers
#
# The original code from Gary Strangman was heavily adapted for
# use in SciPy by Travis Oliphant. The original code came with the
# following disclaimer:
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
"""
A collection of basic statistical functions for python. The function
names appear below.
Some scalar functions defined here are also available in the scipy.special
package where they work on arbitrary sized arrays.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful.
Central Tendency
----------------
.. autosummary::
:toctree: generated/
gmean
hmean
mode
Moments
-------
.. autosummary::
:toctree: generated/
moment
variation
skew
kurtosis
normaltest
Altered Versions
----------------
.. autosummary::
:toctree: generated/
tmean
tvar
tstd
tsem
describe
Frequency Stats
---------------
.. autosummary::
:toctree: generated/
itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
Variability
-----------
.. autosummary::
:toctree: generated/
obrientransform
signaltonoise
sem
zmap
zscore
iqr
Trimming Functions
------------------
.. autosummary::
:toctree: generated/
threshold
trimboth
trim1
Correlation Functions
---------------------
.. autosummary::
:toctree: generated/
pearsonr
fisher_exact
spearmanr
pointbiserialr
kendalltau
weightedtau
linregress
theilslopes
Inferential Stats
-----------------
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
chisquare
power_divergence
ks_2samp
mannwhitneyu
ranksums
wilcoxon
kruskal
friedmanchisquare
combine_pvalues
Probability Calculations
------------------------
.. autosummary::
:toctree: generated/
chisqprob
betai
ANOVA Functions
---------------
.. autosummary::
:toctree: generated/
f_oneway
f_value
Support Functions
-----------------
.. autosummary::
:toctree: generated/
ss
square_of_sums
rankdata
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
from __future__ import division, print_function, absolute_import
import warnings
import math
from collections import namedtuple
import numpy as np
from numpy import array, asarray, ma, zeros
from scipy._lib.six import callable, string_types
from scipy._lib._version import NumpyVersion
import scipy.special as special
import scipy.linalg as linalg
from . import distributions
from . import mstats_basic
from ._distn_infrastructure import _lazywhere
from ._stats_mstats_common import _find_repeats, linregress, theilslopes
from ._stats import _kendall_dis, _toint64, _weightedrankedtau
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore', 'histogram',
'histogram2', 'cumfreq', 'relfreq', 'obrientransform',
'signaltonoise', 'sem', 'zmap', 'zscore', 'iqr', 'threshold',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean', 'f_oneway',
'pearsonr', 'fisher_exact', 'spearmanr', 'pointbiserialr',
'kendalltau', 'weightedtau',
'linregress', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', 'kstest',
'chisquare', 'power_divergence', 'ks_2samp', 'mannwhitneyu',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'chisqprob', 'betai',
'f_value_wilks_lambda', 'f_value', 'f_value_multivariate',
'ss', 'square_of_sums', 'fastsort', 'rankdata',
'combine_pvalues', ]
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
if b.ndim == 0:
b = np.atleast_1d(b)
return a, b, outaxis
def _contains_nan(a, nan_policy='propagate'):
policies = ['propagate', 'raise', 'omit']
if nan_policy not in policies:
raise ValueError("nan_policy must be one of {%s}" %
', '.join("'%s'" % s for s in policies))
try:
# Calling np.sum to avoid creating a huge array into memory
# e.g. np.isnan(a).any()
with np.errstate(invalid='ignore'):
contains_nan = np.isnan(np.sum(a))
except TypeError:
# If the check cannot be properly performed we fallback to omiting
# nan values and raising a warning. This can happen when attempting to
# sum things that are not numbers (e.g. as in the function `mode`).
contains_nan = False
nan_policy = 'omit'
warnings.warn("The input array could not be properly checked for nan "
"values. nan values will be ignored.", RuntimeWarning)
if contains_nan and nan_policy == 'raise':
raise ValueError("The input contains nan values")
return (contains_nan, nan_policy)
def gmean(a, axis=0, dtype=None):
"""
Compute the geometric mean along the specified axis.
Returns the geometric average of the array elements.
That is: n-th root of (x1 * x2 * ... * xn)
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the geometric mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If dtype is not specified, it defaults to the
dtype of a, unless a has an integer dtype with a precision less than
that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
gmean : ndarray
see dtype parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean : Harmonic mean
Notes
-----
The geometric average is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity because masked
arrays automatically mask any non-finite values.
"""
if not isinstance(a, np.ndarray):
# if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype:
# Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
return np.exp(log_a.mean(axis=axis))
def hmean(a, axis=0, dtype=None):
"""
Calculates the harmonic mean along the specified axis.
That is: n / (1/x1 + 1/x2 + ... + 1/xn)
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int or None, optional
Axis along which the harmonic mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
hmean : ndarray
see `dtype` parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean : Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity.
"""
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
if np.all(a > 0):
# Harmonic mean only defined if greater than zero
if isinstance(a, np.ma.MaskedArray):
size = a.count(axis)
else:
if axis is None:
a = a.ravel()
size = a.shape[0]
else:
size = a.shape[axis]
return size / np.sum(1.0/a, axis=axis, dtype=dtype)
else:
raise ValueError("Harmonic mean only defined if all elements greater than zero")
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
def mode(a, axis=0, nan_policy='propagate'):
"""
Returns an array of the modal (most common) value in the passed array.
If there is more than one such value, only the smallest is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
... [3, 2, 1, 7],
... [8, 1, 8, 4],
... [5, 3, 0, 5],
... [4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
(array([[3, 1, 0, 0]]), array([[1, 1, 1, 1]]))
To get mode of whole array, specify ``axis=None``:
>>> stats.mode(a, axis=None)
(array([3]), array([3]))
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return ModeResult(np.array([]), np.array([]))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.mode(a, axis)
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return ModeResult(mostfrequent, oldcounts)
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""
Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to compute test. Default is None.
Returns
-------
tmean : float
See also
--------
trim_mean : returns mean after trimming a proportion from both tails.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmean(x)
9.5
>>> stats.tmean(x, (3,17))
10.0
"""
a = asarray(a)
if limits is None:
return np.mean(a, None)
am = _mask_to_limits(a.ravel(), limits, inclusive)
return am.mean(axis=axis)
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed variance
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
`tvar` computes the unbiased sample variance, i.e. it uses a correction
factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tvar(x)
35.0
>>> stats.tvar(x, (3,17))
20.0
"""
a = asarray(a)
a = a.astype(float).ravel()
if limits is None:
n = len(a)
return a.var() * n/(n-1.)
am = _mask_to_limits(a, limits, inclusive)
return np.ma.var(am, ddof=ddof, axis=axis)
def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed minimum
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
array of values
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmin : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmin(x)
0
>>> stats.tmin(x, 13)
13
>>> stats.tmin(x, 13, inclusive=False)
14
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.minimum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed maximum
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
array of values
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmax : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmax(x)
19
>>> stats.tmax(x, 13)
13
>>> stats.tmax(x, 13, inclusive=False)
12
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (None, upperlimit), (False, inclusive))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.maximum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed sample standard deviation
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tstd : float
Notes
-----
`tstd` computes the unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tstd(x)
5.9160797830996161
>>> stats.tstd(x, (3,17))
4.4721359549995796
"""
return np.sqrt(tvar(a, limits, inclusive, axis, ddof))
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Notes
-----
`tsem` uses unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tsem(x)
1.3228756555322954
>>> stats.tsem(x, (3,17))
1.1547005383792515
"""
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=ddof) / np.sqrt(a.size)
am = _mask_to_limits(a, limits, inclusive)
sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis))
return sd / np.sqrt(am.count())
#####################################
# MOMENTS #
#####################################
def moment(a, moment=1, axis=0, nan_policy='propagate'):
r"""
Calculates the nth moment about the mean for a sample.
A moment is a specific quantitative measure of the shape of a set of points.
It is often used to calculate coefficients of skewness and kurtosis due
to its close relationship with them.
Parameters
----------
a : array_like
data
moment : int or array_like of ints, optional
order of central moment that is returned. Default is 1.
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
See also
--------
kurtosis, skew, describe
Notes
-----
The k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k
Where n is the number of samples and x-bar is the mean. This function uses
exponentiation by squares [1]_ for efficiency.
References
----------
.. [1] http://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.moment(a, moment, axis)
if a.size == 0:
# empty array, return nan(s) with shape matching `moment`
if np.isscalar(moment):
return np.nan
else:
return np.ones(np.asarray(moment).shape, dtype=np.float64) * np.nan
# for array_like moment input, return a value for each.
if not np.isscalar(moment):
mmnt = [_moment(a, i, axis) for i in moment]
return np.array(mmnt)
else:
return _moment(a, moment, axis)
def _moment(a, moment, axis):
if np.abs(moment - np.round(moment)) > 0:
raise ValueError("All moment parameters must be integers")
if moment == 0:
# When moment equals 0, the result is 1, by definition.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.ones(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return 1.0
elif moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n-1)/2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
a_zero_mean = a - np.expand_dims(np.mean(a, axis), axis)
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return np.mean(s, axis)
def variation(a, axis=0, nan_policy='propagate'):
"""
Computes the coefficient of variation, the ratio of the biased standard
deviation to the mean.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.variation(a, axis)
return a.std(axis) / a.mean(axis)
def skew(a, axis=0, bias=True, nan_policy='propagate'):
"""
Computes the skewness of a data set.
For normally distributed data, the skewness should be about 0. A skewness
value > 0 means that there is more weight in the left tail of the
distribution. The function `skewtest` can be used to determine if the
skewness value is close enough to 0, statistically speaking.
Parameters
----------
a : ndarray
data
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skew(a, axis, bias)
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
zero = (m2 == 0)
vals = _lazywhere(~zero, (m2, m3),
lambda m2, m3: m3 / m2**1.5,
0.)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n-1.0)*n) / (n-2.0) * m3/m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'):
"""
Computes the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
data for which the kurtosis is calculated
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosis(a, axis, fisher, bias)
n = a.shape[axis]
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
zero = (m2 == 0)
olderr = np.seterr(all='ignore')
try:
vals = np.where(zero, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 3) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item() # array scalar
if fisher:
return vals - 3
else:
return vals
DescribeResult = namedtuple('DescribeResult',
('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis'))
def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
"""
Computes several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom (only for variance). Default is 1.
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected for
statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
nobs : int
Number of observations (length of data along `axis`).
minmax: tuple of ndarrays or floats
Minimum and maximum value of data array.
mean : ndarray or float
Arithmetic mean of data along axis.
variance : ndarray or float
Unbiased variance of the data along axis, denominator is number of
observations minus one.
skewness : ndarray or float
Skewness, based on moment calculations with denominator equal to
the number of observations, i.e. no degrees of freedom correction.
kurtosis : ndarray or float
Kurtosis (Fisher). The kurtosis is normalized so that it is
zero for the normal distribution. No degrees of freedom are used.
See Also
--------
skew, kurtosis
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10)
>>> stats.describe(a)
DescribeResult(nobs=10, minmax=(0, 9), mean=4.5, variance=9.1666666666666661,
skewness=0.0, kurtosis=-1.2242424242424244)
>>> b = [[1, 2], [3, 4]]
>>> stats.describe(b)
DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
mean=array([ 2., 3.]), variance=array([ 2., 2.]),
skewness=array([ 0., 0.]), kurtosis=array([-2., -2.]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.describe(a, axis, ddof, bias)
if a.size == 0:
raise ValueError("The input must not be empty.")
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
#####################################
# NORMALITY TESTS #
#####################################
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
def skewtest(a, axis=0, nan_policy='propagate'):
"""
Tests whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
The data to be tested
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
a 2-sided p-value for the hypothesis test
Notes
-----
The sample size must be at least 8.
References
----------
.. [1] R. B. D'Agostino, A. J. Belanger and R. B. D'Agostino Jr.,
"A suggestion for using powerful and informative tests of
normality", American Statistician 44, pp. 316-321, 1990.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skewtest(a, axis)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a, axis)
n = float(a.shape[axis])
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n))
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
((n-2.0) * (n+5) * (n+7) * (n+9)))
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue'))
def kurtosistest(a, axis=0, nan_policy='propagate'):
"""
Tests whether a dataset has normal kurtosis
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.
Parameters
----------
a : array
array of the sample data
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The 2-sided p-value for the hypothesis test
Notes
-----
Valid only for n>20. The Z-score is set to 0 for bad entries.
This function uses the method described in [1]_.
References
----------
.. [1] see e.g. F. J. Anscombe, W. J. Glynn, "Distribution of the kurtosis
statistic b2 for normal samples", Biometrika, vol. 70, pp. 227-234, 1983.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosistest(a, axis)
n = float(a.shape[axis])
if n < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % int(n))
if n < 20:
warnings.warn("kurtosistest only valid for n>=20 ... continuing "
"anyway, n=%i" % int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) # [1]_ Eq. 1
x = (b2-E) / np.sqrt(varb2) # [1]_ Eq. 4
# [1]_ Eq. 2:
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
# [1]_ Eq. 3:
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
denom = np.where(denom < 0, 99, denom)
term2 = np.where(denom < 0, term1, np.power((1-2.0/A)/denom, 1/3.0))
Z = (term1 - term2) / np.sqrt(2/(9.0*A)) # [1]_ Eq. 5
Z = np.where(denom == 99, 0, Z)
if Z.ndim == 0:
Z = Z[()]
# zprob uses upper tail, so Z needs to be positive
return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
def normaltest(a, axis=0, nan_policy='propagate'):
"""
Tests whether a sample differs from a normal distribution.
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array_like
The array containing the data to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
References
----------
.. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
moderate and large sample size", Biometrika, 58, 341-348
.. [2] D'Agostino, R. and Pearson, E. S. (1973), "Tests for departure from
normality", Biometrika, 60, 613-622
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.normaltest(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
def jarque_bera(x):
"""
Perform the Jarque-Bera goodness of fit test on sample data.
The Jarque-Bera test tests whether the sample data has the skewness and
kurtosis matching a normal distribution.
Note that this test only works for a large enough number of data samples
(>2000) as the test statistic asymptotically has a Chi-squared distribution
with 2 degrees of freedom.
Parameters
----------
x : array_like
Observations of a random variable.
Returns
-------
jb_value : float
The test statistic.
p : float
The p-value for the hypothesis test.
References
----------
.. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
homoscedasticity and serial independence of regression residuals",
6 Econometric Letters 255-259.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(987654321)
>>> x = np.random.normal(0, 1, 100000)
>>> y = np.random.rayleigh(1, 100000)
>>> stats.jarque_bera(x)
(4.7165707989581342, 0.09458225503041906)
>>> stats.jarque_bera(y)
(6713.7098548143422, 0.0)
"""
x = np.asarray(x)
n = float(x.size)
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return jb_value, p
#####################################
# FREQUENCY FUNCTIONS #
#####################################
def itemfreq(a):
"""
Returns a 2-D array of item frequencies.
Parameters
----------
a : (N,) array_like
Input array.
Returns
-------
itemfreq : (K, 2) ndarray
A 2-D frequency table. Column 1 contains sorted, unique values from
`a`, column 2 contains their respective counts.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
>>> stats.itemfreq(a)
array([[ 0., 2.],
[ 1., 4.],
[ 2., 2.],
[ 4., 1.],
[ 5., 1.]])
>>> np.bincount(a)
array([2, 4, 2, 0, 1, 1])
>>> stats.itemfreq(a/10.)
array([[ 0. , 2. ],
[ 0.1, 4. ],
[ 0.2, 2. ],
[ 0.4, 1. ],
[ 0.5, 1. ]])
"""
items, inv = np.unique(a, return_inverse=True)
freq = np.bincount(inv)
return np.array([items, freq]).T
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""
Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
- fraction: ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``.
- lower: ``i``.
- higher: ``j``.
axis : int, optional
Axis along which the percentiles are computed. Default is None. If
None, compute over the whole array `a`.
Returns
-------
score : float or ndarray
Score at percentile(s).
See Also
--------
percentileofscore, numpy.percentile
Notes
-----
This function will become obsolete in the future.
For Numpy 1.9 and higher, `numpy.percentile` provides all the functionality
that `scoreatpercentile` provides. And it's significantly faster.
Therefore it's recommended to use `numpy.percentile` for users that have
numpy >= 1.9.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function. When we require numpy >= 1.8,
# the implementation of this function can be replaced by np.percentile.
a = np.asarray(a)
if a.size == 0:
# empty array, return nan(s) with shape matching `per`
if np.isscalar(per):
return np.nan
else:
return np.ones(np.asarray(per).shape, dtype=np.float64) * np.nan
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
sorted = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted, per, interpolation_method, axis):
if not np.isscalar(per):
score = [_compute_qth_percentile(sorted, i, interpolation_method, axis)
for i in per]
return np.array(score)
if (per < 0) or (per > 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted.ndim
idx = per / 100. * (sorted.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce (== np.sum but a little faster) to coerce data type
return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
"""
The percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a : array_like
Array of scores to which `score` is compared.
score : int or float
Score that is compared to the elements in `a`.
kind : {'rank', 'weak', 'strict', 'mean'}, optional
This optional parameter specifies the interpretation of the
resulting score:
- "rank": Average percentage ranking of score. In case of
multiple matches, average the percentage rankings of
all matching scores.
- "weak": This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80%
means that 80% of values are less than or equal
to the provided score.
- "strict": Similar to "weak", except that only values that are
strictly less than the given score are counted.
- "mean": The average of the "weak" and "strict" scores, often used in
testing. See
http://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
See Also
--------
numpy.percentile
Examples
--------
Three-quarters of the given values lie below a given score:
>>> from scipy import stats
>>> stats.percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
"""
a = np.array(a)
n = len(a)
if kind == 'rank':
if not np.any(a == score):
a = np.append(a, score)
a_len = np.array(list(range(len(a))))
else:
a_len = np.array(list(range(len(a)))) + 1.0
a = np.sort(a)
idx = [a == score]
pct = (np.mean(a_len[idx]) / n) * 100.0
return pct
elif kind == 'strict':
return np.sum(a < score) / float(n) * 100
elif kind == 'weak':
return np.sum(a <= score) / float(n) * 100
elif kind == 'mean':
return (np.sum(a < score) + np.sum(a <= score)) * 50 / float(n)
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
@np.deprecate(message=("scipy.stats.histogram2 is deprecated in scipy 0.16.0; "
"use np.histogram2d instead"))
def histogram2(a, bins):
"""
Compute histogram using divisions in bins.
Count the number of times values from array `a` fall into
numerical ranges defined by `bins`. Range x is given by
bins[x] <= range_x < bins[x+1] where x =0,N and N is the
length of the `bins` array. The last range is given by
bins[N] <= range_N < infinity. Values less than bins[0] are
not included in the histogram.
Parameters
----------
a : array_like of rank 1
The array of values to be assigned into bins
bins : array_like of rank 1
Defines the ranges of values to use during histogramming.
Returns
-------
histogram2 : ndarray of rank 1
Each value represents the occurrences for a given bin (range) of
values.
"""
# comment: probably obsoleted by numpy.histogram()
n = np.searchsorted(np.sort(a), bins)
n = np.concatenate([n, [len(a)]])
return n[1:] - n[:-1]
HistogramResult = namedtuple('HistogramResult',
('count', 'lowerlimit', 'binsize', 'extrapoints'))
@np.deprecate(message=("scipy.stats.histogram is deprecated in scipy 0.17.0; "
"use np.histogram instead"))
def histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
# _histogram is used in relfreq/cumfreq, so need to keep it
res = _histogram(a, numbins=numbins, defaultlimits=defaultlimits,
weights=weights, printextras=printextras)
return res
def _histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
"""
Separates the range into several bins and returns the number of instances
in each bin.
Parameters
----------
a : array_like
Array of scores which will be put into bins.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras : bool, optional
If True, if there are extra points (i.e. the points that fall outside
the bin limits) a warning is raised saying how many of those points
there are. Default is False.
Returns
-------
count : ndarray
Number of points (or sum of weights) in each bin.
lowerlimit : float
Lowest value of histogram, the lower limit of the first bin.
binsize : float
The size of the bins (all bins have the same size).
extrapoints : int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
Notes
-----
This histogram is based on numpy's histogram but has a larger range by
default if default limits is not set.
"""
a = np.ravel(a)
if defaultlimits is None:
if a.size == 0:
# handle empty arrays. Undetermined range, so use 0-1.
defaultlimits = (0, 1)
else:
# no range given, so use values in `a`
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
return HistogramResult(hist, defaultlimits[0], binsize, extrapoints)
CumfreqResult = namedtuple('CumfreqResult',
('cumcount', 'lowerlimit', 'binsize',
'extrapoints'))
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
A cumulative histogram is a mapping that counts the cumulative number of
observations in all of the bins up to the specified bin.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumcount : ndarray
Binned values of cumulative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> x = [1, 4, 2, 1, 3, 1]
>>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> res.cumcount
array([ 1., 2., 3., 3.])
>>> res.extrapoints
3
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate cumulative frequencies
>>> res = stats.cumfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size,
... res.cumcount.size)
Plot histogram and cumulative histogram
>>> fig = plt.figure(figsize=(10, 4))
>>> ax1 = fig.add_subplot(1, 2, 1)
>>> ax2 = fig.add_subplot(1, 2, 2)
>>> ax1.hist(samples, bins=25)
>>> ax1.set_title('Histogram')
>>> ax2.bar(x, res.cumcount, width=res.binsize)
>>> ax2.set_title('Cumulative histogram')
>>> ax2.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
return CumfreqResult(cumhist, l, b, e)
RelfreqResult = namedtuple('RelfreqResult',
('frequency', 'lowerlimit', 'binsize',
'extrapoints'))
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a relative frequency histogram, using the histogram function.
A relative frequency histogram is a mapping of the number of
observations in each of the bins relative to the total of observations.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
frequency : ndarray
Binned values of relative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> a = np.array([2, 4, 1, 2, 3, 2])
>>> res = stats.relfreq(a, numbins=4)
>>> res.frequency
array([ 0.16666667, 0.5 , 0.16666667, 0.16666667])
>>> np.sum(res.frequency) # relative frequencies should add up to 1
1.0
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate relative frequencies
>>> res = stats.relfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,
... res.frequency.size)
Plot relative frequency histogram
>>> fig = plt.figure(figsize=(5, 4))
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.bar(x, res.frequency, width=res.binsize)
>>> ax.set_title('Relative frequency histogram')
>>> ax.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
a = np.asanyarray(a)
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
h = h / float(a.shape[0])
return RelfreqResult(h, l, b, e)
#####################################
# VARIABILITY FUNCTIONS #
#####################################
def obrientransform(*args):
"""
Computes the O'Brien transform on input data (any number of arrays).
Used to test for homogeneity of variance prior to running one-way stats.
Each array in ``*args`` is one level of a factor.
If `f_oneway` is run on the transformed data and found significant,
the variances are unequal. From Maxwell and Delaney [1]_, p.112.
Parameters
----------
args : tuple of array_like
Any number of arrays.
Returns
-------
obrientransform : ndarray
Transformed data for use in an ANOVA. The first dimension
of the result corresponds to the sequence of transformed
arrays. If the arrays given are all 1-D of the same length,
the return value is a 2-D array; otherwise it is a 1-D array
of type object, with each element being an ndarray.
References
----------
.. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
Examples
--------
We'll test the following data sets for differences in their variance.
>>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
>>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
Apply the O'Brien transform to the data.
>>> from scipy.stats import obrientransform
>>> tx, ty = obrientransform(x, y)
Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
transformed data.
>>> from scipy.stats import f_oneway
>>> F, p = f_oneway(tx, ty)
>>> p
0.1314139477040335
If we require that ``p < 0.05`` for significance, we cannot conclude
that the variances are different.
"""
TINY = np.sqrt(np.finfo(float).eps)
# `arrays` will hold the transformed arguments.
arrays = []
for arg in args:
a = np.asarray(arg)
n = len(a)
mu = np.mean(a)
sq = (a - mu)**2
sumsq = sq.sum()
# The O'Brien transform.
t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
# Check that the mean of the transformed data is equal to the
# original variance.
var = sumsq / (n - 1)
if abs(var - np.mean(t)) > TINY:
raise ValueError('Lack of convergence in obrientransform.')
arrays.append(t)
return np.array(arrays)
@np.deprecate(message="scipy.stats.signaltonoise is deprecated in scipy 0.16.0")
def signaltonoise(a, axis=0, ddof=0):
"""
The signal-to-noise ratio of the input data.
Returns the signal-to-noise ratio of `a`, here defined as the mean
divided by the standard deviation.
Parameters
----------
a : array_like
An array_like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction for standard deviation. Default is 0.
Returns
-------
s2n : ndarray
The mean to standard deviation ratio(s) along `axis`, or 0 where the
standard deviation is 0.
"""
a = np.asanyarray(a)
m = a.mean(axis)
sd = a.std(axis=axis, ddof=ddof)
return np.where(sd == 0, 0, m/sd)
def sem(a, axis=0, ddof=1, nan_policy='propagate'):
"""
Calculates the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std and np.nanstd.
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.sem(a, axis, ddof)
n = a.shape[axis]
s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
return s
def zscore(a, axis=0, ddof=0):
"""
Calculates the z score of each value in the sample, relative to the
sample mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of
input array `a`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091,
... 0.1954, 0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom
(``ddof=1``) to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
... [ 0.7149, 0.0775, 0.6072, 0.9656],
... [ 0.6341, 0.1403, 0.9759, 0.4064],
... [ 0.5918, 0.6948, 0.904 , 0.3721],
... [ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],
[ 0.33048416, -1.37380874, 0.04251374, 1.00081084],
[ 0.26796377, -1.12598418, 1.23283094, -0.37481053],
[-0.22095197, 0.24468594, 1.19042819, -1.21416216],
[-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])
"""
a = np.asanyarray(a)
mns = a.mean(axis=axis)
sstd = a.std(axis=axis, ddof=ddof)
if axis and mns.ndim < a.ndim:
return ((a - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (a - mns) / sstd
def zmap(scores, compare, axis=0, ddof=0):
"""
Calculates the relative z-scores.
Returns an array of z-scores, i.e., scores that are standardized to
zero mean and unit variance, where mean and variance are calculated
from the comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0. If None, compute over the whole array `scores`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> from scipy.stats import zmap
>>> a = [0.5, 2.0, 2.5, 3]
>>> b = [0, 1, 2, 3, 4]
>>> zmap(a, b)
array([-1.06066017, 0. , 0.35355339, 0.70710678])
"""
scores, compare = map(np.asanyarray, [scores, compare])
mns = compare.mean(axis=axis)
sstd = compare.std(axis=axis, ddof=ddof)
if axis and mns.ndim < compare.ndim:
return ((scores - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (scores - mns) / sstd
# Private dictionary initialized only once at module level
# See https://en.wikipedia.org/wiki/Robust_measures_of_scale
_scale_conversions = {'raw': 1.0,
'normal': special.erfinv(0.5) * 2.0 * math.sqrt(2.0)}
def iqr(x, axis=None, rng=(25, 75), scale='raw', nan_policy='propagate',
interpolation='linear', keepdims=False):
"""
Compute the interquartile range of the data along the specified
axis.
The interquartile range (IQR) is the difference between the 75th and
25th percentile of the data. It is a measure of the dispersion
similar to standard deviation or variance, but is much more robust
against outliers [2]_.
The ``rng`` parameter allows this function to compute other
percentile ranges than the actual IQR. For example, setting
``rng=(0, 100)`` is equivalent to `numpy.ptp`.
The IQR of an empty array is `np.nan`.
.. versionadded:: 0.18.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the range is computed. The default is to
compute the IQR for the entire array.
rng : Two-element sequence containing floats in range of [0,100] optional
Percentiles over which to compute the range. Each must be
between 0 and 100, inclusive. The default is the true IQR:
`(25, 75)`. The order of the elements is not important.
scale : scalar or str, optional
The numerical value of scale will be divided out of the final
result. The following string values are recognized:
'raw' : No scaling, just return the raw IQR.
'normal' : Scale by :math:`2 \\sqrt{2} erf^{-1}(\\frac{1}{2}) \\approx 1.349`.
The default is 'raw'. Array-like scale is also allowed, as long
as it broadcasts correctly to the output such that
``out / scale`` is a valid operation. The output dimensions
depend on the input array, `x`, the `axis` argument, and the
`keepdims` flag.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate'
returns nan, 'raise' throws an error, 'omit' performs the
calculations ignoring nan values. Default is 'propagate'.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}, optional
Specifies the interpolation method to use when the percentile
boundaries lie between two data points `i` and `j`:
* 'linear' : `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* 'lower' : `i`.
* 'higher' : `j`.
* 'nearest' : `i` or `j` whichever is nearest.
* 'midpoint' : `(i + j) / 2`.
Default is 'linear'.
keepdims : bool, optional
If this is set to `True`, the reduced axes are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array `x`.
Returns
-------
iqr : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var
Examples
--------
>>> from scipy.stats import iqr
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> iqr(x)
4.0
>>> iqr(x, axis=0)
array([ 3.5, 2.5, 1.5])
>>> iqr(x, axis=1)
array([ 3., 1.])
>>> iqr(x, axis=1, keepdims=True)
array([[ 3.],
[ 1.]])
Notes
-----
This function is heavily dependent on the version of `numpy` that is
installed. Versions greater than 1.11.0b3 are highly recommended, as they
include a number of enhancements and fixes to `numpy.percentile` and
`numpy.nanpercentile` that affect the operation of this function. The
following modifications apply:
Below 1.10.0 : `nan_policy` is poorly defined.
The default behavior of `numpy.percentile` is used for 'propagate'. This
is a hybrid of 'omit' and 'propagate' that mostly yields a skewed
version of 'omit' since NaNs are sorted to the end of the data. A
warning is raised if there are NaNs in the data.
Below 1.9.0: `numpy.nanpercentile` does not exist.
This means that `numpy.percentile` is used regardless of `nan_policy`
and a warning is issued. See previous item for a description of the
behavior.
Below 1.9.0: `keepdims` and `interpolation` are not supported.
The keywords get ignored with a warning if supplied with non-default
values. However, multiple axes are still supported.
References
----------
.. [1] "Interquartile range" https://en.wikipedia.org/wiki/Interquartile_range
.. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale
.. [3] "Quantile" https://en.wikipedia.org/wiki/Quantile
"""
x = asarray(x)
# This check prevents percentile from raising an error later. Also, it is
# consistent with `np.var` and `np.std`.
if not x.size:
return np.nan
# An error may be raised here, so fail-fast, before doing lengthy
# computations, even though `scale` is not used until later
if isinstance(scale, string_types):
scale_key = scale.lower()
if scale_key not in _scale_conversions:
raise ValueError("{0} not a valid scale for `iqr`".format(scale))
scale = _scale_conversions[scale_key]
# Select the percentile function to use based on nans and policy
contains_nan, nan_policy = _contains_nan(x, nan_policy)
if contains_nan and nan_policy == 'omit':
percentile_func = _iqr_nanpercentile
else:
percentile_func = _iqr_percentile
if len(rng) != 2:
raise TypeError("quantile range must be two element sequence")
rng = sorted(rng)
pct = percentile_func(x, rng, axis=axis, interpolation=interpolation,
keepdims=keepdims, contains_nan=contains_nan)
out = np.subtract(pct[1], pct[0])
if scale != 1.0:
out /= scale
return out
def _iqr_percentile(x, q, axis=None, interpolation='linear', keepdims=False, contains_nan=False):
"""
Private wrapper that works around older versions of `numpy`.
While this function is pretty much necessary for the moment, it
should be removed as soon as the minimum supported numpy version
allows.
"""
if contains_nan and NumpyVersion(np.__version__) < '1.10.0a':
# I see no way to avoid the version check to ensure that the corrected
# NaN behavior has been implemented except to call `percentile` on a
# small array.
msg = "Keyword nan_policy='propagate' not correctly supported for " \
"numpy versions < 1.10.x. The default behavior of " \
"`numpy.percentile` will be used."
warnings.warn(msg, RuntimeWarning)
try:
# For older versions of numpy, there are two things that can cause a
# problem here: missing keywords and non-scalar axis. The former can be
# partially handled with a warning, the latter can be handled fully by
# hacking in an implementation similar to numpy's function for
# providing multi-axis functionality
# (`numpy.lib.function_base._ureduce` for the curious).
result = np.percentile(x, q, axis=axis, keepdims=keepdims,
interpolation=interpolation)
except TypeError:
if interpolation != 'linear' or keepdims:
# At time or writing, this means np.__version__ < 1.9.0
warnings.warn("Keywords interpolation and keepdims not supported "
"for your version of numpy", RuntimeWarning)
try:
# Special processing if axis is an iterable
original_size = len(axis)
except TypeError:
# Axis is a scalar at this point
pass
else:
axis = np.unique(np.asarray(axis) % x.ndim)
if original_size > axis.size:
# mimic numpy if axes are duplicated
raise ValueError("duplicate value in axis")
if axis.size == x.ndim:
# axis includes all axes: revert to None
axis = None
elif axis.size == 1:
# no rolling necessary
axis = axis[0]
else:
# roll multiple axes to the end and flatten that part out
for ax in axis[::-1]:
x = np.rollaxis(x, ax, x.ndim)
x = x.reshape(x.shape[:-axis.size] +
(np.prod(x.shape[-axis.size:]),))
axis = -1
result = np.percentile(x, q, axis=axis)
return result
def _iqr_nanpercentile(x, q, axis=None, interpolation='linear', keepdims=False, contains_nan=False):
"""
Private wrapper that works around the following:
1. A bug in `np.nanpercentile` that was around until numpy version
1.11.0.
2. A bug in `np.percentile` NaN handling that was fixed in numpy
version 1.10.0.
3. The non-existence of `np.nanpercentile` before numpy version
1.9.0.
While this function is pretty much necessary for the moment, it
should be removed as soon as the minimum supported numpy version
allows.
"""
if hasattr(np, 'nanpercentile'):
# At time or writing, this means np.__version__ < 1.9.0
result = np.nanpercentile(x, q, axis=axis,
interpolation=interpolation, keepdims=keepdims)
# If non-scalar result and nanpercentile does not do proper axis roll.
# I see no way of avoiding the version test since dimensions may just
# happen to match in the data.
if result.ndim > 1 and NumpyVersion(np.__version__) < '1.11.0a':
axis = np.asarray(axis)
if axis.size == 1:
# If only one axis specified, reduction happens along that dimension
if axis.ndim == 0:
axis = axis[None]
result = np.rollaxis(result, axis[0])
else:
# If multiple axes, reduced dimeision is last
result = np.rollaxis(result, -1)
else:
msg = "Keyword nan_policy='omit' not correctly supported for numpy " \
"versions < 1.9.x. The default behavior of numpy.percentile " \
"will be used."
warnings.warn(msg, RuntimeWarning)
result = _iqr_percentile(x, q, axis=axis)
return result
#####################################
# TRIMMING FUNCTIONS #
#####################################
@np.deprecate(message="stats.threshold is deprecated in scipy 0.17.0")
def threshold(a, threshmin=None, threshmax=None, newval=0):
"""
Clip array to a given value.
Similar to numpy.clip(), except that values less than `threshmin` or
greater than `threshmax` are replaced by `newval`, instead of by
`threshmin` and `threshmax` respectively.
Parameters
----------
a : array_like
Data to threshold.
threshmin : float, int or None, optional
Minimum threshold, defaults to None.
threshmax : float, int or None, optional
Maximum threshold, defaults to None.
newval : float or int, optional
Value to put in place of values in `a` outside of bounds.
Defaults to 0.
Returns
-------
out : ndarray
The clipped input array, with values less than `threshmin` or
greater than `threshmax` replaced with `newval`.
Examples
--------
>>> a = np.array([9, 9, 6, 3, 1, 6, 1, 0, 0, 8])
>>> from scipy import stats
>>> stats.threshold(a, threshmin=2, threshmax=8, newval=-1)
array([-1, -1, 6, 3, -1, 6, -1, -1, -1, 8])
"""
a = asarray(a).copy()
mask = zeros(a.shape, dtype=bool)
if threshmin is not None:
mask |= (a < threshmin)
if threshmax is not None:
mask |= (a > threshmax)
a[mask] = newval
return a
SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower', 'upper'))
def sigmaclip(a, low=4., high=4.):
"""
Iterative sigma-clipping of array elements.
The output array contains only those elements of the input array `c`
that satisfy the conditions ::
mean(c) - std(c)*low < c < mean(c) + std(c)*high
Starting from the full sample, all elements outside the critical range are
removed. The iteration continues with a new critical range until no
elements are outside the range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
clipped : ndarray
Input array with clipped elements removed.
lower : float
Lower threshold value use for clipping.
upper : float
Upper threshold value use for clipping.
Examples
--------
>>> from scipy.stats import sigmaclip
>>> a = np.concatenate((np.linspace(9.5, 10.5, 31),
... np.linspace(0, 20, 5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5, 10.5, 11),
... np.linspace(-100, -50, 3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5, 10.5, 11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std*low
critupper = c_mean + c_std*high
c = c[(c > critlower) & (c < critupper)]
delta = size - c.size
return SigmaclipResult(c, critlower, critupper)
def trimboth(a, proportiontocut, axis=0):
"""
Slices off a proportion of items from both ends of an array.
Slices off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). The trimmed values are the lowest and
highest ones.
Slices off less if proportion results in a non-integer slice index (i.e.,
conservatively slices off`proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float
Proportion (in range 0-1) of total data set to trim of each end.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
out : ndarray
Trimmed version of array `a`. The order of the trimmed content
is undefined.
See Also
--------
trim_mean
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trimboth(a, 0.1)
>>> b.shape
(16,)
"""
a = np.asarray(a)
if a.size == 0:
return a
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return atmp[sl]
def trim1(a, proportiontocut, tail='right', axis=0):
"""
Slices off a proportion from ONE end of the passed array distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. The lowest or highest values are trimmed (depending on
the tail).
Slices off less if proportion results in a non-integer slice index
(i.e., conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution
tail : {'left', 'right'}, optional
Defaults to 'right'.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`. The order of the trimmed content is
undefined.
"""
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
# avoid possible corner case
if proportiontocut >= 1:
return []
if tail.lower() == 'right':
lowercut = 0
uppercut = nobs - int(proportiontocut * nobs)
elif tail.lower() == 'left':
lowercut = int(proportiontocut * nobs)
uppercut = nobs
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
return atmp[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
"""
Return mean of array after trimming distribution from both tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. The input is sorted before slicing. Slices off less if proportion
results in a non-integer slice index (i.e., conservatively slices off
`proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of both tails of the distribution
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
tmean : compute the trimmed mean ignoring values outside given `limits`.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.trim_mean(x, 0.1)
9.5
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
a = np.asarray(a)
if a.size == 0:
return np.nan
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return np.mean(atmp[sl], axis=axis)
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
def f_oneway(*args):
"""
Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group.
Returns
-------
statistic : float
The computed F-value of the test.
pvalue : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
.. [3] McDonald, G. H. "Handbook of Biological Statistics", One-way ANOVA.
http://www.biostathandbook.com/onewayanova.html
Examples
--------
>>> import scipy.stats as stats
[3]_ Here are some data on a shell measurement (the length of the anterior
adductor muscle scar, standardized by dividing by length) in the mussel
Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon;
Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a
much larger data set used in McDonald et al. (1991).
>>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735,
... 0.0659, 0.0923, 0.0836]
>>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,
... 0.0725]
>>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]
>>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764,
... 0.0689]
>>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]
>>> stats.f_oneway(tillamook, newport, petersburg, magadan, tvarminne)
(7.1210194716424473, 0.00028122423145345439)
"""
args = [np.asarray(arg, dtype=float) for arg in args]
# ANOVA on N groups, each in its own array
num_groups = len(args)
alldata = np.concatenate(args)
bign = len(alldata)
# Determine the mean of the data, and subtract that from all inputs to a
# variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariance
# to a shift in location, and centering all data around zero vastly
# improves numerical stability.
offset = alldata.mean()
alldata -= offset
sstot = _sum_of_squares(alldata) - (_square_of_sums(alldata) / float(bign))
ssbn = 0
for a in args:
ssbn += _square_of_sums(a - offset) / float(len(a))
# Naming: variables ending in bn/b are for "between treatments", wn/w are
# for "within treatments"
ssbn -= (_square_of_sums(alldata) / float(bign))
sswn = sstot - ssbn
dfbn = num_groups - 1
dfwn = bign - num_groups
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
return F_onewayResult(f, prob)
def pearsonr(x, y):
"""
Calculates a Pearson correlation coefficient and the p-value for testing
non-correlation.
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed, and not necessarily zero-mean.
Like other correlation coefficients, this one varies between -1 and +1
with 0 implying no correlation. Correlations of -1 or +1 imply an exact
linear relationship. Positive correlations imply that as x increases, so
does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : (N,) array_like
Input
y : (N,) array_like
Input
Returns
-------
r : float
Pearson's correlation coefficient
p-value : float
2-tailed p-value
References
----------
http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation
"""
# x and y should have same length.
x = np.asarray(x)
y = np.asarray(y)
n = len(x)
mx = x.mean()
my = y.mean()
xm, ym = x - mx, y - my
r_num = np.add.reduce(xm * ym)
r_den = np.sqrt(_sum_of_squares(xm) * _sum_of_squares(ym))
r = r_num / r_den
# Presumably, if abs(r) > 1, then it is only some small artifact of floating
# point arithmetic.
r = max(min(r, 1.0), -1.0)
df = n - 2
if abs(r) == 1.0:
prob = 0.0
else:
t_squared = r**2 * (df / ((1.0 - r) * (1.0 + r)))
prob = _betai(0.5*df, 0.5, df/(df+t_squared))
return r, prob
def fisher_exact(table, alternative='two-sided'):
"""Performs a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Which alternative hypothesis to the null hypothesis the test uses.
Default is 'two-sided'.
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
Notes
-----
The calculated odds ratio is different from the one R uses. This scipy
implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
For tables with large numbers, the (inexact) chi-square test implemented
in the function `chi2_contingency` can also be used.
Examples
--------
Say we spend a few days counting whales and sharks in the Atlantic and
Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
Indian ocean 2 whales and 5 sharks. Then our contingency table is::
Atlantic Indian
whales 8 2
sharks 1 5
We use this table to find the p-value:
>>> import scipy.stats as stats
>>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]])
>>> pvalue
0.0349...
The probability that we would observe this or an even more imbalanced ratio
by chance is about 3.5%. A commonly used significance level is 5%--if we
adopt that, we can therefore conclude that our observed imbalance is
statistically significant; whales prefer the Atlantic while sharks prefer
the Indian ocean.
"""
hypergeom = distributions.hypergeom
c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1,0] > 0 and c[0,1] > 0:
oddsratio = c[0,0] * c[1,1] / float(c[1,0] * c[0,1])
else:
oddsratio = np.inf
n1 = c[0,0] + c[0,1]
n2 = c[1,0] + c[1,1]
n = c[0,0] + c[1,0]
def binary_search(n, n1, n2, side):
"""Binary search for where to begin lower/upper halves in two-sided
test.
"""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeom.cdf(c[0,1], n1 + n2, n1, c[0,1] + c[1,1])
elif alternative == 'two-sided':
mode = int(float((n + 1) * (n1 + 1)) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0,0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0,0] < mode:
plower = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0,0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
if pvalue > 1.0:
pvalue = 1.0
return oddsratio, pvalue
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
def spearmanr(a, b=None, axis=0, nan_policy='propagate'):
"""
Calculates a Spearman rank-order correlation coefficient and the p-value
to test for non-correlation.
The Spearman correlation is a nonparametric measure of the monotonicity
of the relationship between two datasets. Unlike the Pearson correlation,
the Spearman correlation does not assume that both datasets are normally
distributed. Like other correlation coefficients, this one varies
between -1 and +1 with 0 implying no correlation. Correlations of -1 or
+1 imply an exact monotonic relationship. Positive correlations imply that
as x increases, so does y. Negative correlations imply that as x
increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
Both arrays need to have the same length in the ``axis`` dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
correlation : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in a and b
combined.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
that two sets of data are uncorrelated, has same dimension as rho.
Notes
-----
Changes in scipy 0.8.0: rewrite to add tie-handling, and axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
(0.82078268166812329, 0.088587005313543798)
>>> np.random.seed(1234321)
>>> x2n = np.random.randn(100, 2)
>>> y2n = np.random.randn(100, 2)
>>> stats.spearmanr(x2n)
(0.059969996999699973, 0.55338590803773591)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
(0.059969996999699973, 0.55338590803773591)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> pval
array([[ 0. , 0.55338591, 0.06435364, 0.53617935],
[ 0.55338591, 0. , 0.27592895, 0.80234077],
[ 0.06435364, 0.27592895, 0. , 0.73039992],
[ 0.53617935, 0.80234077, 0.73039992, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
(0.10816770419260482, 0.1273562188027364)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
(0.10816770419260482, 0.1273562188027364)
>>> xint = np.random.randint(10, size=(100, 2))
>>> stats.spearmanr(xint)
(0.052760927029710199, 0.60213045837062351)
"""
a, axisout = _chk_asarray(a, axis)
a_contains_nan, nan_policy = _contains_nan(a, nan_policy)
if a_contains_nan:
a = ma.masked_invalid(a)
if a.size <= 1:
return SpearmanrResult(np.nan, np.nan)
ar = np.apply_along_axis(rankdata, axisout, a)
br = None
if b is not None:
b, axisout = _chk_asarray(b, axis)
b_contains_nan, nan_policy = _contains_nan(b, nan_policy)
if a_contains_nan or b_contains_nan:
b = ma.masked_invalid(b)
if nan_policy == 'propagate':
rho, pval = mstats_basic.spearmanr(a, b, axis)
return SpearmanrResult(rho * np.nan, pval * np.nan)
if nan_policy == 'omit':
return mstats_basic.spearmanr(a, b, axis)
br = np.apply_along_axis(rankdata, axisout, b)
n = a.shape[axisout]
rs = np.corrcoef(ar, br, rowvar=axisout)
olderr = np.seterr(divide='ignore') # rs can have elements equal to 1
try:
# clip the small negative values possibly caused by rounding
# errors before taking the square root
t = rs * np.sqrt(((n-2)/((rs+1.0)*(1.0-rs))).clip(0))
finally:
np.seterr(**olderr)
prob = 2 * distributions.t.sf(np.abs(t), n-2)
if rs.shape == (2, 2):
return SpearmanrResult(rs[1, 0], prob[1, 0])
else:
return SpearmanrResult(rs, prob)
PointbiserialrResult = namedtuple('PointbiserialrResult',
('correlation', 'pvalue'))
def pointbiserialr(x, y):
r"""
Calculates a point biserial correlation coefficient and its p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function uses a shortcut formula but produces the same result as
`pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value
pvalue : float
2-tailed p-value
Notes
-----
`pointbiserialr` uses a t-test with ``n-1`` degrees of freedom.
It is equivalent to `pearsonr.`
The value of the point-biserial correlation can be calculated from:
.. math::
r_{pb} = \frac{\overline{Y_{1}} -
\overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}}
Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric
observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}`
are number of observations coded 0 and 1 respectively; :math:`N` is the
total number of observations and :math:`s_{y}` is the standard
deviation of all the metric observations.
A value of :math:`r_{pb}` that is significantly different from zero is
completely equivalent to a significant difference in means between the two
groups. Thus, an independent groups t Test with :math:`N-2` degrees of
freedom may be used to test whether :math:`r_{pb}` is nonzero. The
relation between the t-statistic for comparing two independent groups and
:math:`r_{pb}` is given by:
.. math::
t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}}
References
----------
.. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math.
Statist., Vol. 20, no.1, pp. 125-126, 1949.
.. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous
Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25,
np. 3, pp. 603-607, 1954.
.. [3] http://onlinelibrary.wiley.com/doi/10.1002/9781118445112.stat06227/full
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
rpb, prob = pearsonr(x, y)
return PointbiserialrResult(rpb, prob)
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
def kendalltau(x, y, initial_lexsort=None, nan_policy='propagate'):
"""
Calculates Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, values close to -1 indicate
strong disagreement. This is the 1945 "tau-b" version of Kendall's
tau [2]_, which can account for ties and which reduces to the 1938 "tau-a"
version [1]_ in absence of ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
initial_lexsort : bool, optional
Unused (deprecated).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'. Note that if the input contains nan
'omit' delegates to mstats_basic.kendalltau(), which has a different
implementation.
Returns
-------
correlation : float
The tau statistic.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
See also
--------
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
weightedtau : Computes a weighted version of Kendall's tau.
Notes
-----
The definition of Kendall's tau that is used is [2]_::
tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U.
References
----------
.. [1] Maurice G. Kendall, "A New Measure of Rank Correlation", Biometrika
Vol. 30, No. 1/2, pp. 81-93, 1938.
.. [2] Maurice G. Kendall, "The treatment of ties in ranking problems",
Biometrika Vol. 33, No. 3, pp. 239-251. 1945.
.. [3] Gottfried E. Noether, "Elements of Nonparametric Statistics", John
Wiley & Sons, 1967.
.. [4] Peter M. Fenwick, "A new data structure for cumulative frequency
tables", Software: Practice and Experience, Vol. 24, No. 3,
pp. 327-336, 1994.
Examples
--------
>>> from scipy import stats
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.2827454599327748
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `kendalltau` must be of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
elif not x.size or not y.size:
return KendalltauResult(np.nan, np.nan) # Return NaN if arrays are empty
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == 'omit' or npy == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'propagate':
return KendalltauResult(np.nan, np.nan)
elif contains_nan and nan_policy == 'omit':
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.kendalltau(x, y)
if initial_lexsort is not None: # deprecate to drop!
warnings.warn('"initial_lexsort" is gone!')
def count_rank_tie(ranks):
cnt = np.bincount(ranks).astype('int64', copy=False)
cnt = cnt[cnt > 1]
return ((cnt * (cnt - 1) // 2).sum(),
(cnt * (cnt - 1.) * (cnt - 2)).sum(),
(cnt * (cnt - 1.) * (2*cnt + 5)).sum())
size = x.size
perm = np.argsort(y) # sort on y and convert y to dense ranks
x, y = x[perm], y[perm]
y = np.r_[True, y[1:] != y[:-1]].cumsum(dtype=np.intp)
# stable sort on x and convert x to dense ranks
perm = np.argsort(x, kind='mergesort')
x, y = x[perm], y[perm]
x = np.r_[True, x[1:] != x[:-1]].cumsum(dtype=np.intp)
dis = _kendall_dis(x, y) # discordant pairs
obs = np.r_[True, (x[1:] != x[:-1]) | (y[1:] != y[:-1]), True]
cnt = np.diff(np.where(obs)[0]).astype('int64', copy=False)
ntie = (cnt * (cnt - 1) // 2).sum() # joint ties
xtie, x0, x1 = count_rank_tie(x) # ties in x, stats
ytie, y0, y1 = count_rank_tie(y) # ties in y, stats
tot = (size * (size - 1)) // 2
if xtie == tot or ytie == tot:
return KendalltauResult(np.nan, np.nan)
# Note that tot = con + dis + (xtie - ntie) + (ytie - ntie) + ntie
# = con + dis + xtie + ytie - ntie
con_minus_dis = tot - xtie - ytie + ntie - 2 * dis
tau = con_minus_dis / np.sqrt(tot - xtie) / np.sqrt(tot - ytie)
# Limit range to fix computational errors
tau = min(1., max(-1., tau))
# con_minus_dis is approx normally distributed with this variance [3]_
var = (size * (size - 1) * (2.*size + 5) - x1 - y1) / 18. + (
2. * xtie * ytie) / (size * (size - 1)) + x0 * y0 / (9. *
size * (size - 1) * (size - 2))
pvalue = special.erfc(np.abs(con_minus_dis) / np.sqrt(var) / np.sqrt(2))
# Limit range to fix computational errors
return KendalltauResult(min(1., max(-1., tau)), pvalue)
WeightedTauResult = namedtuple('WeightedTauResult', ('correlation', 'pvalue'))
def weightedtau(x, y, rank=True, weigher=None, additive=True):
r"""
Computes a weighted version of Kendall's :math:`\tau`.
The weighted :math:`\tau` is a weighted version of Kendall's
:math:`\tau` in which exchanges of high weight are more influential than
exchanges of low weight. The default parameters compute the additive
hyperbolic version of the index, :math:`\tau_\mathrm h`, which has
been shown to provide the best balance between important and
unimportant elements [1]_.
The weighting is defined by means of a rank array, which assigns a
nonnegative rank to each element, and a weigher function, which
assigns a weight based from the rank to each element. The weight of an
exchange is then the sum or the product of the weights of the ranks of
the exchanged elements. The default parameters compute
:math:`\tau_\mathrm h`: an exchange between elements with rank
:math:`r` and :math:`s` (starting from zero) has weight
:math:`1/(r+1) + 1/(s+1)`.
Specifying a rank array is meaningful only if you have in mind an
external criterion of importance. If, as it usually happens, you do
not have in mind a specific rank, the weighted :math:`\tau` is
defined by averaging the values obtained using the decreasing
lexicographical rank by (`x`, `y`) and by (`y`, `x`). This is the
behavior with default parameters.
Note that if you are computing the weighted :math:`\tau` on arrays of
ranks, rather than of scores (i.e., a larger value implies a lower
rank) you must negate the ranks, so that elements of higher rank are
associated with a larger value.
Parameters
----------
x, y : array_like
Arrays of scores, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
rank: array_like of ints or bool, optional
A nonnegative rank assigned to each element. If it is None, the
decreasing lexicographical rank by (`x`, `y`) will be used: elements of
higher rank will be those with larger `x`-values, using `y`-values to
break ties (in particular, swapping `x` and `y` will give a different
result). If it is False, the element indices will be used
directly as ranks. The default is True, in which case this
function returns the average of the values obtained using the
decreasing lexicographical rank by (`x`, `y`) and by (`y`, `x`).
weigher : callable, optional
The weigher function. Must map nonnegative integers (zero
representing the most important element) to a nonnegative weight.
The default, None, provides hyperbolic weighing, that is,
rank :math:`r` is mapped to weight :math:`1/(r+1)`.
additive : bool, optional
If True, the weight of an exchange is computed by adding the
weights of the ranks of the exchanged elements; otherwise, the weights
are multiplied. The default is True.
Returns
-------
correlation : float
The weighted :math:`\tau` correlation index.
pvalue : float
Presently ``np.nan``, as the null statistics is unknown (even in the
additive hyperbolic case).
See also
--------
kendalltau : Calculates Kendall's tau.
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
Notes
-----
This function uses an :math:`O(n \log n)`, mergesort-based algorithm
[1]_ that is a weighted extension of Knight's algorithm for Kendall's
:math:`\tau` [2]_. It can compute Shieh's weighted :math:`\tau` [3]_
between rankings without ties (i.e., permutations) by setting
`additive` and `rank` to False, as the definition given in [1]_ is a
generalization of Shieh's.
NaNs are considered the smallest possible score.
.. versionadded:: 0.19.0
References
----------
.. [1] Sebastiano Vigna, "A weighted correlation index for rankings with
ties", Proceedings of the 24th international conference on World
Wide Web, pp. 1166-1176, ACM, 2015.
.. [2] W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
Ungrouped Data", Journal of the American Statistical Association,
Vol. 61, No. 314, Part 1, pp. 436-439, 1966.
.. [3] Grace S. Shieh. "A weighted Kendall's tau statistic", Statistics &
Probability Letters, Vol. 39, No. 1, pp. 17-24, 1998.
Examples
--------
>>> from scipy import stats
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.weightedtau(x, y)
>>> tau
-0.56694968153682723
>>> p_value
nan
>>> tau, p_value = stats.weightedtau(x, y, additive=False)
>>> tau
-0.62205716951801038
NaNs are considered the smallest possible score:
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, np.nan]
>>> tau, _ = stats.weightedtau(x, y)
>>> tau
-0.56694968153682723
This is exactly Kendall's tau:
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> tau, _ = stats.weightedtau(x, y, weigher=lambda x: 1)
>>> tau
-0.47140452079103173
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> stats.weightedtau(x, y, rank=None)
WeightedTauResult(correlation=-0.4157652301037516, pvalue=nan)
>>> stats.weightedtau(y, x, rank=None)
WeightedTauResult(correlation=-0.71813413296990281, pvalue=nan)
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `weightedtau` must be of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
if not x.size:
return WeightedTauResult(np.nan, np.nan) # Return NaN if arrays are empty
# If there are NaNs we apply _toint64()
if np.isnan(np.min(x)):
x = _toint64(x)
if np.isnan(np.min(y)):
y = _toint64(y)
# Reduce to ranks unsupported types
if x.dtype != y.dtype:
if x.dtype != np.int64:
x = _toint64(x)
if y.dtype != np.int64:
y = _toint64(y)
else:
if x.dtype not in (np.int32, np.int64, np.float32, np.float64):
x = _toint64(x)
y = _toint64(y)
if rank is True:
return WeightedTauResult((
_weightedrankedtau(x, y, None, weigher, additive) +
_weightedrankedtau(y, x, None, weigher, additive)
) / 2, np.nan)
if rank is False:
rank = np.arange(x.size, dtype=np.intp)
elif rank is not None:
rank = np.asarray(rank).ravel()
if rank.size != x.size:
raise ValueError("All inputs to `weightedtau` must be of the same size, "
"found x-size %s and rank-size %s" % (x.size, rank.size))
return WeightedTauResult(_weightedrankedtau(x, y, rank, weigher, additive), np.nan)
#####################################
# INFERENTIAL STATISTICS #
#####################################
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate'):
"""
Calculates the T-test for the mean of ONE group of scores.
This is a two-sided test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
sample observation
popmean : float or array_like
expected value in null hypothesis, if array_like than it must have the
same shape as `a` excluding the axis dimension
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Examples
--------
>>> from scipy import stats
>>> np.random.seed(7654567) # fix seed to get the same result
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50,2))
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs,5.0)
(array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674]))
>>> stats.ttest_1samp(rvs,0.0)
(array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999]))
Examples using axis and non-scalar dimension for population mean.
>>> stats.ttest_1samp(rvs,[5.0,0.0])
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1)
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs,[[5.0],[0.0]])
(array([[-0.68014479, -0.04323899],
[ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01],
[ 7.89094663e-03, 1.49986458e-04]]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_1samp(a, popmean, axis)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / float(n))
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return Ttest_1sampResult(t, prob)
def _ttest_finish(df, t):
"""Common code between all 3 t-test functions."""
prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail
if t.ndim == 0:
t = t[()]
return t, prob
def _ttest_ind_from_stats(mean1, mean2, denom, df):
d = mean1 - mean2
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return (t, prob)
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
with np.errstate(divide='ignore', invalid='ignore'):
df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2.0
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True):
"""
T-test for means of two independent samples from descriptive statistics.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
Returns
-------
statistic : float or array
The calculated t-statistics
pvalue : float or array
The two-tailed p-value.
See also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
"""
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
res = _ttest_ind_from_stats(mean1, mean2, denom, df)
return Ttest_indResult(*res)
def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate'):
"""
Calculates the T-test for the means of *two independent* samples of scores.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values. This test assumes that the
populations have identical variances by default.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
.. versionadded:: 0.11.0
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The two-tailed p-value.
Notes
-----
We can use this test, if we observe two independent samples from
the same or different population, e.g. exam scores of boys and
girls or of two ethnic groups. The test measures whether the
average (expected) value differs significantly across samples. If
we observe a large p-value, for example larger than 0.05 or 0.1,
then we cannot reject the null hypothesis of identical average scores.
If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%,
then we reject the null hypothesis of equal averages.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
Test with sample with identical means:
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> stats.ttest_ind(rvs1,rvs2)
(0.26833823296239279, 0.78849443369564776)
>>> stats.ttest_ind(rvs1,rvs2, equal_var = False)
(0.26833823296239279, 0.78849452749500748)
`ttest_ind` underestimates p for unequal variances:
>>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500)
>>> stats.ttest_ind(rvs1, rvs3)
(-0.46580283298287162, 0.64145827413436174)
>>> stats.ttest_ind(rvs1, rvs3, equal_var = False)
(-0.46580283298287162, 0.64149646246569292)
When n1 != n2, the equal variance t-statistic is no longer equal to the
unequal variance t-statistic:
>>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs4)
(-0.99882539442782481, 0.3182832709103896)
>>> stats.ttest_ind(rvs1, rvs4, equal_var = False)
(-0.69712570584654099, 0.48716927725402048)
T-test with different means, variance, and n:
>>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs5)
(-1.4679669854490653, 0.14263895620529152)
>>> stats.ttest_ind(rvs1, rvs5, equal_var = False)
(-0.94365973617132992, 0.34744170334794122)
"""
a, b, axis = _chk2_asarray(a, b, axis)
# check both a and b
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.ttest_ind(a, b, axis, equal_var)
if a.size == 0 or b.size == 0:
return Ttest_indResult(np.nan, np.nan)
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
n1 = a.shape[axis]
n2 = b.shape[axis]
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
res = _ttest_ind_from_stats(np.mean(a, axis), np.mean(b, axis), denom, df)
return Ttest_indResult(*res)
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
def ttest_rel(a, b, axis=0, nan_policy='propagate'):
"""
Calculates the T-test on TWO RELATED samples of scores, a and b.
This is a two-sided test for the null hypothesis that 2 related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Notes
-----
Examples for the use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
References
----------
http://en.wikipedia.org/wiki/T-test#Dependent_t-test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) # fix random seed to get same numbers
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs2)
(0.24101764965300962, 0.80964043445811562)
>>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs3)
(-3.9995108708727933, 7.3082402191726459e-005)
"""
a, b, axis = _chk2_asarray(a, b, axis)
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
m = ma.mask_or(ma.getmask(a), ma.getmask(b))
aa = ma.array(a, mask=m, copy=True)
bb = ma.array(b, mask=m, copy=True)
return mstats_basic.ttest_rel(aa, bb, axis)
if a.shape[axis] != b.shape[axis]:
raise ValueError('unequal length arrays')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
n = a.shape[axis]
df = float(n - 1)
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / float(n))
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t)
return Ttest_relResult(t, prob)
KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue'))
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx'):
"""
Perform the Kolmogorov-Smirnov test for goodness of fit.
This performs a test of the distribution G(x) of an observed
random variable against a given distribution F(x). Under the null
hypothesis the two distributions are identical, G(x)=F(x). The
alternative hypothesis can be either 'two-sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : str, array or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If an array, it should be a 1-D array of observations of random
variables.
If a callable, it should be a function to generate random variables;
it is required to have a keyword argument `size`.
cdf : str or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If `rvs` is a string then `cdf` can be False or the same as `rvs`.
If a callable, that callable is used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used if `rvs` or `cdf` are strings.
N : int, optional
Sample size if `rvs` is string or callable. Default is 20.
alternative : {'two-sided', 'less','greater'}, optional
Defines the alternative hypothesis (see explanation above).
Default is 'two-sided'.
mode : 'approx' (default) or 'asymp', optional
Defines the distribution used for calculating the p-value.
- 'approx' : use approximation to exact distribution of test statistic
- 'asymp' : use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D-.
pvalue : float
One-tailed or two-tailed p-value.
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function F(x) of the
hypothesis, ``G(x)<=F(x)``, resp. ``G(x)>=F(x)``.
Examples
--------
>>> from scipy import stats
>>> x = np.linspace(-15, 15, 9)
>>> stats.kstest(x, 'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> stats.kstest('norm', False, N=100)
(0.058352892479417884, 0.88531190944151261)
The above lines are equivalent to:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.norm.rvs(size=100), 'norm')
(0.058352892479417884, 0.88531190944151261)
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that ``cdf_dgp(x) < norm.cdf(x)``:
>>> np.random.seed(987654321)
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> stats.kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> stats.kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Don't reject equal distribution against alternative hypothesis: greater
>>> stats.kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224)
"""
if isinstance(rvs, string_types):
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError("if rvs is string, cdf has to be the "
"same distribution")
if isinstance(cdf, string_types):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size': N}
vals = np.sort(rvs(*args, **kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
if alternative in ['two-sided', 'greater']:
Dplus = (np.arange(1.0, N + 1)/N - cdfvals).max()
if alternative == 'greater':
return KstestResult(Dplus, distributions.ksone.sf(Dplus, N))
if alternative in ['two-sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return KstestResult(Dmin, distributions.ksone.sf(Dmin, N))
if alternative == 'two-sided':
D = np.max([Dplus, Dmin])
if mode == 'asymp':
return KstestResult(D, distributions.kstwobign.sf(D * np.sqrt(N)))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D * np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000:
return KstestResult(D, pval_two)
else:
return KstestResult(D, 2 * distributions.ksone.sf(D, N))
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
"""
Count the number of non-masked elements of an array.
This function behaves like np.ma.count(), but is much faster
for ndarrays.
"""
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
Power_divergenceResult = namedtuple('Power_divergenceResult',
('statistic', 'pvalue'))
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""
Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
`lambda_` gives the power in the Cressie-Read power divergence
statistic. The default is 1. For convenience, `lambda_` may be
assigned one of the following strings, in which case the
corresponding numerical value is used::
String Value Description
"pearson" 1 Pearson's chi-squared statistic.
In this case, the function is
equivalent to `stats.chisquare`.
"log-likelihood" 0 Log-likelihood ratio. Also known as
the G-test [3]_.
"freeman-tukey" -1/2 Freeman-Tukey statistic.
"mod-log-likelihood" -1 Modified log-likelihood ratio.
"neyman" -2 Neyman's statistic.
"cressie-read" 2/3 The power recommended in [5]_.
Returns
-------
statistic : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", http://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> from scipy.stats import power_divergence
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.3281031458963746, 0.6495419288047497)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, string_types):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. Valid strings "
"are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
if f_exp is not None:
f_exp = np.atleast_1d(np.asanyarray(f_exp))
else:
# Compute the equivalent of
# f_exp = f_obs.mean(axis=axis, keepdims=True)
# Older versions of numpy do not have the 'keepdims' argument, so
# we have to do a little work to achieve the same result.
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = np.atleast_1d(f_obs.mean(axis=axis))
if axis is not None:
reduced_shape = list(f_obs.shape)
reduced_shape[axis] = 1
f_exp.shape = reduced_shape
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
return Power_divergenceResult(stat, p)
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
"""
Calculates a one-way chi square test.
The chi square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
Returns
-------
chisq : float or ndarray
The chi-squared test statistic. The value is a float if `axis` is
None or `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `chisq` are scalars.
See Also
--------
power_divergence
mstats.chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
Examples
--------
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies.
>>> from scipy.stats import chisquare
>>> chisquare([16, 18, 16, 14, 12, 12])
(2.0, 0.84914503608460956)
With `f_exp` the expected frequencies can be given.
>>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> chisquare(obs)
(array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> chisquare(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> chisquare(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
chi-squared statistic with `ddof`.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we use ``axis=1``:
>>> chisquare([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
Ks_2sampResult = namedtuple('Ks_2sampResult', ('statistic', 'pvalue'))
def ks_2samp(data1, data2):
"""
Computes the Kolmogorov-Smirnov statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution.
Parameters
----------
data1, data2 : sequence of 1-D ndarrays
two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different
Returns
-------
statistic : float
KS statistic
pvalue : float
two-tailed p-value
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample K-S test, the distribution is
assumed to be continuous.
This is the two-sided test, one-sided tests are not implemented.
The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution.
If the K-S statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) #fix random seed to get the same result
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)
>>> stats.ks_2samp(rvs1, rvs2)
(0.20833333333333337, 4.6674975515806989e-005)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs3)
(0.10333333333333333, 0.14498781825751686)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
data_all = np.concatenate([data1, data2])
cdf1 = np.searchsorted(data1, data_all, side='right') / (1.0*n1)
cdf2 = np.searchsorted(data2, data_all, side='right') / (1.0*n2)
d = np.max(np.absolute(cdf1 - cdf2))
# Note: d absolute not signed distance
en = np.sqrt(n1 * n2 / float(n1 + n2))
try:
prob = distributions.kstwobign.sf((en + 0.12 + 0.11 / en) * d)
except:
prob = 1.0
return Ks_2sampResult(d, prob)
def tiecorrect(rankvals):
"""
Tie correction factor for ties in the Mann-Whitney U and
Kruskal-Wallis H tests.
Parameters
----------
rankvals : array_like
A 1-D sequence of ranks. Typically this will be the array
returned by `stats.rankdata`.
Returns
-------
factor : float
Correction factor for U or H.
See Also
--------
rankdata : Assign ranks to the data
mannwhitneyu : Mann-Whitney rank test
kruskal : Kruskal-Wallis H test
References
----------
.. [1] Siegel, S. (1956) Nonparametric Statistics for the Behavioral
Sciences. New York: McGraw-Hill.
Examples
--------
>>> from scipy.stats import tiecorrect, rankdata
>>> tiecorrect([1, 2.5, 2.5, 4])
0.9
>>> ranks = rankdata([1, 3, 2, 4, 5, 7, 2, 8, 4])
>>> ranks
array([ 1. , 4. , 2.5, 5.5, 7. , 8. , 2.5, 9. , 5.5])
>>> tiecorrect(ranks)
0.9833333333333333
"""
arr = np.sort(rankvals)
idx = np.nonzero(np.r_[True, arr[1:] != arr[:-1], True])[0]
cnt = np.diff(idx).astype(np.float64)
size = np.float64(arr.size)
return 1.0 if size < 2 else 1.0 - (cnt**3 - cnt).sum() / (size**3 - size)
MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic', 'pvalue'))
def mannwhitneyu(x, y, use_continuity=True, alternative=None):
"""
Computes the Mann-Whitney rank test on samples x and y.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into
account. Default is True.
alternative : None (deprecated), 'less', 'two-sided', or 'greater'
Whether to get the p-value for the one-sided hypothesis ('less'
or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to None, which results in a p-value half the size of
the 'two-sided' p-value and a different U statistic. The
default behavior is not the same as using 'less' or 'greater':
it only exists for backward compatibility and is deprecated.
Returns
-------
statistic : float
The Mann-Whitney U statistic, equal to min(U for x, U for y) if
`alternative` is equal to None (deprecated; exists for backward
compatibility), and U for y otherwise.
pvalue : float
p-value assuming an asymptotic normal distribution. One-sided or
two-sided, depending on the choice of `alternative`.
Notes
-----
Use only when the number of observation in each sample is > 20 and
you have 2 independent samples of ranks. Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
This test corrects for ties and by default uses a continuity correction.
"""
if alternative is None:
warnings.warn("Calling `mannwhitneyu` without specifying "
"`alternative` is deprecated.", DeprecationWarning)
x = np.asarray(x)
y = np.asarray(y)
n1 = len(x)
n2 = len(y)
ranked = rankdata(np.concatenate((x, y)))
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
T = tiecorrect(ranked)
if T == 0:
raise ValueError('All numbers are identical in mannwhitneyu')
sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)
meanrank = n1*n2/2.0 + 0.5 * use_continuity
if alternative is None or alternative == 'two-sided':
bigu = max(u1, u2)
elif alternative == 'less':
bigu = u1
elif alternative == 'greater':
bigu = u2
else:
raise ValueError("alternative should be None, 'less', 'greater' "
"or 'two-sided'")
z = (bigu - meanrank) / sd
if alternative is None:
# This behavior, equal to half the size of the two-sided
# p-value, is deprecated.
p = distributions.norm.sf(abs(z))
elif alternative == 'two-sided':
p = 2 * distributions.norm.sf(abs(z))
else:
p = distributions.norm.sf(z)
u = u2
# This behavior is deprecated.
if alternative is None:
u = min(u1, u2)
return MannwhitneyuResult(u, p)
RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue'))
def ranksums(x, y):
"""
Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `scipy.stats.mannwhitneyu`.
Parameters
----------
x,y : array_like
The data from the two samples
Returns
-------
statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed
pvalue : float
The two-sided p-value of the test
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2 * distributions.norm.sf(abs(z))
return RanksumsResult(z, prob)
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
def kruskal(*args, **kwargs):
"""
Compute the Kruskal-Wallis H-test for independent samples
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post-hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution
See Also
--------
f_oneway : 1-way ANOVA
mannwhitneyu : Mann-Whitney rank test on two samples.
friedmanchisquare : Friedman test for repeated measurements
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in
One-Criterion Variance Analysis", Journal of the American Statistical
Association, Vol. 47, Issue 260, pp. 583-621, 1952.
.. [2] http://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
Examples
--------
>>> from scipy import stats
>>> x = [1, 3, 5, 7, 9]
>>> y = [2, 4, 6, 8, 10]
>>> stats.kruskal(x, y)
KruskalResult(statistic=0.27272727272727337, pvalue=0.60150813444058948)
>>> x = [1, 1, 1]
>>> y = [2, 2, 2]
>>> z = [2, 2]
>>> stats.kruskal(x, y, z)
KruskalResult(statistic=7.0, pvalue=0.030197383422318501)
"""
args = list(map(np.asarray, args))
num_groups = len(args)
if num_groups < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
for arg in args:
if arg.size == 0:
return KruskalResult(np.nan, np.nan)
n = np.asarray(list(map(len, args)))
if 'nan_policy' in kwargs.keys():
if kwargs['nan_policy'] not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be 'propagate', "
"'raise' or'omit'")
else:
nan_policy = kwargs['nan_policy']
else:
nan_policy = 'propagate'
contains_nan = False
for arg in args:
cn = _contains_nan(arg, nan_policy)
if cn[0]:
contains_nan = True
break
if contains_nan and nan_policy == 'omit':
for a in args:
a = ma.masked_invalid(a)
return mstats_basic.kruskal(*args)
if contains_nan and nan_policy == 'propagate':
return KruskalResult(np.nan, np.nan)
alldata = np.concatenate(args)
ranked = rankdata(alldata)
ties = tiecorrect(ranked)
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(num_groups):
ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / float(n[i])
totaln = np.sum(n)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = num_groups - 1
h /= ties
return KruskalResult(h, distributions.chi2.sf(h, df))
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
def friedmanchisquare(*args):
"""
Computes the Friedman test for repeated measurements
The Friedman test tests the null hypothesis that repeated measurements of
the same individuals have the same distribution. It is often used
to test for consistency among measurements obtained in different ways.
For example, if two measurement techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
measurement techniques are consistent.
Parameters
----------
measurements1, measurements2, measurements3... : array_like
Arrays of measurements. All of the arrays must have the same number
of elements. At least 3 sets of measurements must be given.
Returns
-------
statistic : float
the test statistic, correcting for ties
pvalue : float
the associated p-value assuming that the test statistic has a chi
squared distribution
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated measurements.
References
----------
.. [1] http://en.wikipedia.org/wiki/Friedman_test
"""
k = len(args)
if k < 3:
raise ValueError('Less than 3 levels. Friedman test not appropriate.')
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / float(k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Methods for combining the p-values of independent tests bearing upon the
same hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests.
method : {'fisher', 'stouffer'}, optional
Name of method to use to combine p-values. The following methods are
available:
- "fisher": Fisher's method (Fisher's combined probability test),
the default.
- "stouffer": Stouffer's Z-score method.
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method:
- "fisher": The chi-squared statistic
- "stouffer": The Z-score
pval: float
The combined p-value.
Notes
-----
Fisher's method (also known as Fisher's combined probability test) [1]_ uses
a chi-squared statistic to compute a combined p-value. The closely related
Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The
advantage of Stouffer's method is that it is straightforward to introduce
weights, which can make Stouffer's method more powerful than Fisher's
method when the p-values are from studies of different size [3]_ [4]_.
Fisher's method may be extended to combine p-values from dependent tests
[5]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [2] http://en.wikipedia.org/wiki/Fisher's_method#Relation_to_Stouffer.27s_Z-score_method
.. [3] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [4] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [5] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
Xsq = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(Xsq, 2 * len(pvalues))
return (Xsq, pval)
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
Z = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(Z)
return (Z, pval)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher' or 'stouffer'", method)
#####################################
# PROBABILITY CALCULATIONS #
#####################################
@np.deprecate(message="stats.chisqprob is deprecated in scipy 0.17.0; "
"use stats.distributions.chi2.sf instead.")
def chisqprob(chisq, df):
"""
Probability value (1-tail) for the Chi^2 probability distribution.
Broadcasting rules apply.
Parameters
----------
chisq : array_like or float > 0
df : array_like or float, probably int >= 1
Returns
-------
chisqprob : ndarray
The area from `chisq` to infinity under the Chi^2 probability
distribution with degrees of freedom `df`.
"""
return distributions.chi2.sf(chisq, df)
@np.deprecate(message="stats.betai is deprecated in scipy 0.17.0; "
"use special.betainc instead")
def betai(a, b, x):
"""
Returns the incomplete beta function.
I_x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a.
The standard broadcasting rules apply to a, b, and x.
Parameters
----------
a : array_like or float > 0
b : array_like or float > 0
x : array_like or float
x will be clipped to be no greater than 1.0 .
Returns
-------
betai : ndarray
Incomplete beta function.
"""
return _betai(a, b, x)
def _betai(a, b, x):
x = np.asarray(x)
x = np.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
return special.betainc(a, b, x)
#####################################
# ANOVA CALCULATIONS #
#####################################
@np.deprecate(message="stats.f_value_wilks_lambda deprecated in scipy 0.17.0")
def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
"""Calculation of Wilks lambda F-statistic for multivarite data, per
Maxwell & Delaney p.657.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
lmbda = linalg.det(EF) / linalg.det(ER)
if (a-1)**2 + (b-1)**2 == 5:
q = 1
else:
q = np.sqrt(((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 - 5))
n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)
d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1)
return n_um / d_en
@np.deprecate(message="stats.f_value deprecated in scipy 0.17.0")
def f_value(ER, EF, dfR, dfF):
"""
Returns an F-statistic for a restricted vs. unrestricted model.
Parameters
----------
ER : float
`ER` is the sum of squared residuals for the restricted model
or null hypothesis
EF : float
`EF` is the sum of squared residuals for the unrestricted model
or alternate hypothesis
dfR : int
`dfR` is the degrees of freedom in the restricted model
dfF : int
`dfF` is the degrees of freedom in the unrestricted model
Returns
-------
F-statistic : float
"""
return (ER - EF) / float(dfR - dfF) / (EF / float(dfF))
@np.deprecate(message="stats.f_value_multivariate deprecated in scipy 0.17.0")
def f_value_multivariate(ER, EF, dfnum, dfden):
"""
Returns a multivariate F-statistic.
Parameters
----------
ER : ndarray
Error associated with the null hypothesis (the Restricted model).
From a multivariate F calculation.
EF : ndarray
Error associated with the alternate hypothesis (the Full model)
From a multivariate F calculation.
dfnum : int
Degrees of freedom the Restricted model.
dfden : int
Degrees of freedom associated with the Restricted model.
Returns
-------
fstat : float
The computed F-statistic.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
n_um = (linalg.det(ER) - linalg.det(EF)) / float(dfnum)
d_en = linalg.det(EF) / float(dfden)
return n_um / d_en
#####################################
# SUPPORT FUNCTIONS #
#####################################
RepeatedResults = namedtuple('RepeatedResults', ('values', 'counts'))
def find_repeats(arr):
"""
Find repeats and repeat counts.
Parameters
----------
arr : array_like
Input array. This is cast to float64.
Returns
-------
values : ndarray
The unique values from the (flattened) input that are repeated.
counts : ndarray
Number of times the corresponding 'value' is repeated.
Notes
-----
In numpy >= 1.9 `numpy.unique` provides similar functionality. The main
difference is that `find_repeats` only returns repeated values.
Examples
--------
>>> from scipy import stats
>>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5])
RepeatedResults(values=array([ 2.]), counts=array([4]))
>>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])
RepeatedResults(values=array([ 4., 5.]), counts=array([2, 2]))
"""
# Note: always copies.
return RepeatedResults(*_find_repeats(np.array(arr, dtype=np.float64)))
@np.deprecate(message="scipy.stats.ss is deprecated in scipy 0.17.0")
def ss(a, axis=0):
return _sum_of_squares(a, axis)
def _sum_of_squares(a, axis=0):
"""
Squares each element of the input array, and returns the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
@np.deprecate(message="scipy.stats.square_of_sums is deprecated "
"in scipy 0.17.0")
def square_of_sums(a, axis=0):
return _square_of_sums(a, axis)
def _square_of_sums(a, axis=0):
"""
Sums elements of the input array, and returns the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
@np.deprecate(message="scipy.stats.fastsort is deprecated in scipy 0.16.0")
def fastsort(a):
"""
Sort an array and provide the argsort.
Parameters
----------
a : array_like
Input array.
Returns
-------
fastsort : ndarray of type int
sorted indices into the original array
"""
# TODO: the wording in the docstring is nonsense.
it = np.argsort(a)
as_ = a[it]
return as_, it
def rankdata(a, method='average'):
"""
rankdata(a, method='average')
Assign ranks to data, dealing with ties appropriately.
Ranks begin at 1. The `method` argument controls how ranks are assigned
to equal values. See [1]_ for further discussion of ranking methods.
Parameters
----------
a : array_like
The array of values to be ranked. The array is first flattened.
method : str, optional
The method used to assign ranks to tied elements.
The options are 'average', 'min', 'max', 'dense' and 'ordinal'.
'average':
The average of the ranks that would have been assigned to
all the tied values is assigned to each value.
'min':
The minimum of the ranks that would have been assigned to all
the tied values is assigned to each value. (This is also
referred to as "competition" ranking.)
'max':
The maximum of the ranks that would have been assigned to all
the tied values is assigned to each value.
'dense':
Like 'min', but the rank of the next highest element is assigned
the rank immediately after those assigned to the tied elements.
'ordinal':
All values are given a distinct rank, corresponding to the order
that the values occur in `a`.
The default is 'average'.
Returns
-------
ranks : ndarray
An array of length equal to the size of `a`, containing rank
scores.
References
----------
.. [1] "Ranking", http://en.wikipedia.org/wiki/Ranking
Examples
--------
>>> from scipy.stats import rankdata
>>> rankdata([0, 2, 3, 2])
array([ 1. , 2.5, 4. , 2.5])
>>> rankdata([0, 2, 3, 2], method='min')
array([ 1, 2, 4, 2])
>>> rankdata([0, 2, 3, 2], method='max')
array([ 1, 3, 4, 3])
>>> rankdata([0, 2, 3, 2], method='dense')
array([ 1, 2, 3, 2])
>>> rankdata([0, 2, 3, 2], method='ordinal')
array([ 1, 2, 4, 3])
"""
if method not in ('average', 'min', 'max', 'dense', 'ordinal'):
raise ValueError('unknown method "{0}"'.format(method))
arr = np.ravel(np.asarray(a))
algo = 'mergesort' if method == 'ordinal' else 'quicksort'
sorter = np.argsort(arr, kind=algo)
inv = np.empty(sorter.size, dtype=np.intp)
inv[sorter] = np.arange(sorter.size, dtype=np.intp)
if method == 'ordinal':
return inv + 1
arr = arr[sorter]
obs = np.r_[True, arr[1:] != arr[:-1]]
dense = obs.cumsum()[inv]
if method == 'dense':
return dense
# cumulative counts of each unique value
count = np.r_[np.nonzero(obs)[0], len(obs)]
if method == 'max':
return count[dense]
if method == 'min':
return count[dense - 1] + 1
# average method
return .5 * (count[dense] + count[dense - 1] + 1)
| bsd-3-clause |
lnls-fac/apsuite | apsuite/commisslib/measure_chromdisp.py | 1 | 11698 | """Main module."""
import time as _time
from math import log10, floor
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as mpl_gs
from siriuspy.devices import SOFB, RFGen, Tune
from pymodels import si
import pyaccel
from ..utils import ThreadedMeasBaseClass as _BaseClass, \
ParamsBaseClass as _ParamsBaseClass
class MeasParams(_ParamsBaseClass):
"""."""
MOM_COMPACT = 1.68e-4
def __init__(self):
"""."""
super().__init__()
self.delta_freq = 200 # [Hz]
self.meas_nrsteps = 8
self.npoints = 5
self.wait_tune = 5 # [s]
self.timeout_wait_sofb = 3 # [s]
self.sofb_nrpoints = 10
def __str__(self):
"""."""
ftmp = '{0:24s} = {1:9.3f} {2:s}\n'.format
dtmp = '{0:24s} = {1:9d} {2:s}\n'.format
stg = ftmp('delta_freq [Hz]', self.delta_freq, '')
stg += dtmp('meas_nrsteps', self.meas_nrsteps, '')
stg += ftmp('wait_tune [s]', self.wait_tune, '')
stg += ftmp(
'timeout_wait_sofb [s]', self.timeout_wait_sofb, '(get orbit)')
stg += dtmp('sofb_nrpoints', self.sofb_nrpoints, '')
return stg
class MeasDispChrom(_BaseClass):
"""."""
def __init__(self):
"""."""
super().__init__(params=MeasParams(), target=self._do_meas)
self.devices['sofb'] = SOFB(SOFB.DEVICES.SI)
self.devices['tune'] = Tune(Tune.DEVICES.SI)
self.devices['rf'] = RFGen()
def __str__(self):
"""."""
stn = 'Params\n'
stp = self.params.__str__()
stp = ' ' + stp.replace('\n', '\n ')
stn += stp + '\n'
stn += 'Connected? ' + str(self.connected) + '\n\n'
return stn
def _do_meas(self):
sofb = self.devices['sofb']
rfgen = self.devices['rf']
tune = self.devices['tune']
loop_on = False
if sofb.autocorrsts:
loop_on = True
print('SOFB feedback is enable, disabling it...')
sofb.cmd_turn_off_autocorr()
delta_freq = self.params.delta_freq
npoints = self.params.meas_nrsteps
sofb.nr_points = self.params.sofb_nrpoints
freq0 = rfgen.frequency
tunex0 = tune.tunex
tuney0 = tune.tuney
orbx0 = sofb.orbx
orby0 = sofb.orby
span = np.linspace(freq0-delta_freq/2, freq0+delta_freq/2, npoints)
freq = []
tunex, tuney = [], []
orbx, orby = [], []
for frq in span:
if self._stopevt.is_set():
print(' exiting...')
break
rfgen.frequency = frq
sofb.cmd_reset()
_time.sleep(self.params.wait_tune)
sofb.wait_buffer(self.params.timeout_wait_sofb)
freq.append(rfgen.frequency)
orbx.append(sofb.orbx)
orby.append(sofb.orby)
tunex.append(tune.tunex)
tuney.append(tune.tuney)
print('delta frequency: {} Hz'.format((
rfgen.frequency-freq0)))
print('dtune x: {}'.format((tunex[-1] - tunex0)))
print('dtune y: {}'.format((tuney[-1] - tuney0)))
print('')
print('Restoring RF frequency...')
rfgen.frequency = freq0
self.data['freq'] = np.array(freq)
self.data['tunex'] = np.array(tunex)
self.data['tuney'] = np.array(tuney)
self.data['orbx'] = np.array(orbx)
self.data['orby'] = np.array(orby)
self.data['freq0'] = freq0
self.data['tunex0'] = tunex0
self.data['tuney0'] = tuney0
self.data['orbx0'] = np.array(orbx0)
self.data['orby0'] = np.array(orby0)
if loop_on:
print('SOFB feedback was enable, restoring original state...')
sofb.cmd_turn_on_autocorr()
print('Finished!')
def process_data(self, fitorder=1, discardpoints=None):
"""."""
data = self.data
usepts = set(range(data['tunex'].shape[0]))
if discardpoints is not None:
usepts = set(usepts) - set(discardpoints)
usepts = sorted(usepts)
freq0 = data['freq0']
den = -(data['freq'] - freq0)/freq0/self.params.MOM_COMPACT
den = den[usepts]
tunex = data['tunex'][usepts]
tuney = data['tuney'][usepts]
orbx = data['orbx'][usepts, :]
orby = data['orby'][usepts, :]
if tunex.size > fitorder + 1:
chromx, chromxcov = np.polyfit(den, tunex, deg=fitorder, cov=True)
chromy, chromycov = np.polyfit(den, tuney, deg=fitorder, cov=True)
dispx, dispxcov = np.polyfit(den, orbx, deg=fitorder, cov=True)
dispy, dispycov = np.polyfit(den, orby, deg=fitorder, cov=True)
else:
chromx = np.polyfit(den, tunex, deg=fitorder, cov=False)
chromy = np.polyfit(den, tuney, deg=fitorder, cov=False)
dispx = np.polyfit(den, orbx, deg=fitorder, cov=False)
dispy = np.polyfit(den, orby, deg=fitorder, cov=False)
chromxcov = chromycov = np.zeros(
(fitorder+1, fitorder+1), dtype=float)
dispxcov = dispycov = np.zeros(
(fitorder+1, fitorder+1, orbx.shape[1]), dtype=float)
um2m = 1e-6
self.analysis['delta'] = den
self.analysis['orbx'] = orbx
self.analysis['orby'] = orby
self.analysis['dispx'] = dispx * um2m
self.analysis['dispy'] = dispy * um2m
self.analysis['dispx_err'] = np.sqrt(np.diagonal(dispxcov)) * um2m
self.analysis['dispy_err'] = np.sqrt(np.diagonal(dispycov)) * um2m
self.analysis['tunex'] = tunex
self.analysis['tuney'] = tuney
self.analysis['chromx'] = chromx
self.analysis['chromy'] = chromy
self.analysis['chromx_err'] = np.sqrt(np.diagonal(chromxcov))
self.analysis['chromy_err'] = np.sqrt(np.diagonal(chromycov))
def make_figure_chrom(self, analysis=None, title='', fname=''):
"""."""
fig = plt.figure(figsize=(10, 5))
grid = mpl_gs.GridSpec(1, 1)
grid.update(
left=0.12, right=0.95, bottom=0.15, top=0.9,
hspace=0.5, wspace=0.35)
if title:
fig.suptitle(title)
if analysis is None:
analysis = self.analysis
den = self.analysis['delta']
tunex = self.analysis['tunex']
tuney = self.analysis['tuney']
chromx = self.analysis['chromx']
chromx_err = self.analysis['chromx_err']
chromy = self.analysis['chromy']
chromy_err = self.analysis['chromy_err']
dtunex = tunex - chromx[-1]
dtuney = tuney - chromy[-1]
dtunex_fit = np.polyval(chromx, den) - chromx[-1]
dtuney_fit = np.polyval(chromy, den) - chromy[-1]
axx = plt.subplot(grid[0, 0])
axx.plot(den*100, dtunex*1000, '.b', label='horizontal')
axx.plot(den*100, dtunex_fit*1000, '-b')
axx.plot(den*100, dtuney*1000, '.r', label='vertical')
axx.plot(den*100, dtuney_fit*1000, '-r')
axx.set_xlabel(r'$\delta$ [%]')
axx.set_ylabel(r'$\Delta \nu \times 1000$')
chromx = np.flip(chromx)
chromx_err = np.flip(chromx_err)
chromy = np.flip(chromy)
chromy_err = np.flip(chromy_err)
stx = MeasDispChrom.polynomial_to_latex(chromx, chromx_err)
sty = MeasDispChrom.polynomial_to_latex(chromy, chromy_err)
stg = r'$\Delta\nu_x = $' + stx + '\n'
stg += r'$\Delta\nu_y = $' + sty
axx.text(
0.4, 0.05, stg, horizontalalignment='left',
verticalalignment='bottom', transform=axx.transAxes,
bbox=dict(edgecolor='k', facecolor='w', alpha=1.0))
axx.legend()
axx.grid(True)
if fname:
fig.savefig(fname+'.svg')
plt.close()
else:
fig.show()
def make_figure_disp(self, analysis=None, disporder=1, title='', fname=''):
"""."""
fig = plt.figure(figsize=(10, 5))
grid = mpl_gs.GridSpec(1, 1)
grid.update(
left=0.12, right=0.95, bottom=0.15, top=0.9,
hspace=0.5, wspace=0.35)
if title:
fig.suptitle(title)
if analysis is None:
analysis = self.analysis
simod = si.create_accelerator()
fam = si.get_family_data(simod)
spos = pyaccel.lattice.find_spos(simod, indices='open')
bpmidx = np.array(fam['BPM']['index']).ravel()
sposbpm = spos[bpmidx]
fitorder_anlys = analysis['dispx'].shape[0] - 1
if disporder > fitorder_anlys:
raise Exception(
'It does not make sense to plot a fit order higher than' +
'the analysis')
fitidx = fitorder_anlys - disporder
dispx = analysis['dispx'][fitidx, :]
dispy = analysis['dispy'][fitidx, :]
dispx_err = analysis['dispx_err'][:, fitidx]
dispy_err = analysis['dispy_err'][:, fitidx]
m2cm = 100
axx = plt.subplot(grid[0, 0])
axx.errorbar(
sposbpm, dispx*m2cm, dispx_err*m2cm, None, '.-b',
label='horizontal')
axx.errorbar(
sposbpm, dispy*m2cm, dispy_err*m2cm, None, '.-r', label='vertical')
axx.set_xlabel('s [m]')
ylabel = r'$\eta_{:d}$ [cm]'.format(disporder)
axx.set_ylabel(ylabel)
axx.legend()
axx.grid(True)
if fname:
fig.savefig(fname+'.svg')
plt.close()
else:
fig.show()
# Adapted from:
# https://perso.crans.org/besson/publis/notebooks/
# Demonstration%20of%20numpy.polynomial.
# Polynomial%20and%20nice%20display%20with%20LaTeX%20and%20MathJax%20
# (python3).html
@staticmethod
def polynomial_to_latex(poly, error):
""" Small function to print nicely the polynomial p as we write it in
maths, in LaTeX code."""
poly = np.poly1d(poly)
coefs = poly.coef # List of coefficient, sorted by increasing degrees
res = '' # The resulting string
for idx, coef_idx in enumerate(coefs):
err = error[idx]
sig_fig = int(floor(log10(abs(err))))
err = round(err, -sig_fig)
coef_idx = round(coef_idx, -sig_fig)
if int(coef_idx) == coef_idx: # Remove the trailing .0
coef_idx = int(coef_idx)
if idx == 0: # First coefficient, no need for X
continue
elif idx == 1: # Second coefficient, only X and not X**i
if coef_idx == 1: # coef_idx = 1 does not need to be displayed
res += "\delta + "
elif coef_idx > 0:
res += "({a} \pm {b}) \;\delta + ".format(
a="{%g}" % coef_idx, b="{%g}" % err)
elif coef_idx < 0:
res += "({a} \pm {b}) \;\delta + ".format(
a="{%g}" % coef_idx, b="{%g}" % err)
else:
if coef_idx == 1:
# A special care needs to be addressed to put the exponent
# in {..} in LaTeX
res += "\delta^{i} + ".format(i="{%d}" % idx)
elif coef_idx > 0:
res += "({a} \pm {b}) \;\delta^{i} + ".format(
a="{%g}" % coef_idx, b="{%g}" % err, i="{%d}" % idx)
elif coef_idx < 0:
res += "({a} \pm {b}) \;\delta^{i} + ".format(
a="{%g}" % coef_idx, b="{%g}" % err, i="{%d}" % idx)
return "$" + res[:-3] + "$" if res else ""
| mit |
lpfann/fri | fri/model/regression.py | 1 | 3623 | import cvxpy as cvx
import numpy as np
from sklearn.utils import check_X_y
from sklearn.metrics import r2_score
from .base_cvxproblem import Relevance_CVXProblem
from .base_initmodel import InitModel
from .base_type import ProblemType
class Regression(ProblemType):
@classmethod
def parameters(cls):
return ["C", "epsilon"]
@property
def get_initmodel_template(cls):
return Regression_SVR
@property
def get_cvxproblem_template(cls):
return Regression_Relevance_Bound
def relax_factors(cls):
return ["loss_slack", "w_l1_slack"]
def preprocessing(self, data, **kwargs):
X, y = data
# Check that X and y have correct shape
X, y = check_X_y(X, y)
return X, y
class Regression_SVR(InitModel):
HYPERPARAMETER = ["C", "epsilon"]
def __init__(self, C=1, epsilon=0.1):
super().__init__()
self.epsilon = epsilon
self.C = C
def fit(self, X, y, **kwargs):
(n, d) = X.shape
C = self.get_params()["C"]
epsilon = self.get_params()["epsilon"]
w = cvx.Variable(shape=(d), name="w")
slack = cvx.Variable(shape=(n), name="slack")
b = cvx.Variable(name="bias")
objective = cvx.Minimize(cvx.norm(w, 1) + C * cvx.sum(slack))
constraints = [cvx.abs(y - (X @ w + b)) <= epsilon + slack, slack >= 0]
# Solve problem.
problem = cvx.Problem(objective, constraints)
problem.solve(**self.SOLVER_PARAMS)
w = w.value
b = b.value
slack = np.asarray(slack.value).flatten()
self.model_state = {"w": w, "b": b, "slack": slack}
loss = np.sum(slack)
w_l1 = np.linalg.norm(w, ord=1)
self.constraints = {"loss": loss, "w_l1": w_l1}
return self
def predict(self, X):
w = self.model_state["w"]
b = self.model_state["b"]
y = np.dot(X, w) + b
return y
def score(self, X, y, **kwargs):
prediction = self.predict(X)
# Using weighted f1 score to have a stable score for imbalanced datasets
score = r2_score(y, prediction)
return score
class Regression_Relevance_Bound(Relevance_CVXProblem):
def init_objective_UB(self, sign=None, **kwargs):
self.add_constraint(
self.feature_relevance <= sign * self.w[self.current_feature]
)
self._objective = cvx.Maximize(self.feature_relevance)
def init_objective_LB(self, **kwargs):
self.add_constraint(
cvx.abs(self.w[self.current_feature]) <= self.feature_relevance
)
self._objective = cvx.Minimize(self.feature_relevance)
def _init_constraints(self, parameters, init_model_constraints):
# Upper constraints from initial model
l1_w = init_model_constraints["w_l1"]
init_loss = init_model_constraints["loss"]
C = parameters["C"]
epsilon = parameters["epsilon"]
# New Variables
self.w = cvx.Variable(shape=(self.d), name="w")
self.b = cvx.Variable(name="b")
self.slack = cvx.Variable(shape=(self.n), nonneg=True, name="slack")
# New Constraints
distance_from_plane = cvx.abs(self.y - (self.X @ self.w + self.b))
self.loss = cvx.sum(self.slack)
self.weight_norm = cvx.norm(self.w, 1)
self.add_constraint(distance_from_plane <= epsilon + self.slack)
self.add_constraint(self.weight_norm <= l1_w)
self.add_constraint(C * self.loss <= C * init_loss)
self.feature_relevance = cvx.Variable(nonneg=True, name="Feature Relevance")
| mit |
mmottahedi/neuralnilm_prototype | scripts/e165.py | 2 | 5124 | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from neuralnilm.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for LSTM
e110
* Back to Uniform(5) for LSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single LSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
e131
e138
* Trying to replicate e82 and then break it ;)
e140
diff
e141
conv1D layer has Uniform(1), as does 2nd LSTM layer
e142
diff AND power
e144
diff and power and max power is 5900
e145
Uniform(25) for first layer
e146
gradient clip and use peepholes
e147
* try again with new code
e148
* learning rate 0.1
e150
* Same as e149 but without peepholes and using LSTM not BLSTM
e151
* Max pooling
"""
def exp_a(name):
# 151d but training for much longer and skip prob = 0.7
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer'
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[200, 100, 200, 2500, 2400],
on_power_thresholds=[5, 5, 5, 5, 5],
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.0,
n_seq_per_batch=25,
include_diff=True
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=250,
loss_function=mse,
updates=partial(nesterov_momentum, learning_rate=.1, clip_range=(-1, 1)),
layers_config=[
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(25),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(1),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None
}
]
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
print("EXCEPTION:", exception)
import ipdb; ipdb.set_trace()
if __name__ == "__main__":
main()
| mit |
Sentient07/scikit-learn | examples/cluster/plot_color_quantization.py | 61 | 3444 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1])
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |
admcrae/tensorflow | tensorflow/examples/learn/iris_custom_decay_dnn.py | 30 | 2039 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with exponential decay."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets
from sklearn import metrics
from sklearn.cross_validation import train_test_split
import tensorflow as tf
def optimizer_exp_decay():
global_step = tf.contrib.framework.get_or_create_global_step()
learning_rate = tf.train.exponential_decay(
learning_rate=0.1, global_step=global_step,
decay_steps=100, decay_rate=0.001)
return tf.train.AdagradOptimizer(learning_rate=learning_rate)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
optimizer=optimizer_exp_decay)
classifier.fit(x_train, y_train, steps=800)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
anirudhjayaraman/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 243 | 7461 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.utils import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
MulletLab/leafangle_supplement | v_sorghum/scripts/plot_par_fit_vsorg_grain_k2.py | 2 | 6849 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# python plot_par_fit_vsorg_grain_k2.py ../caribu_out/June_15_2013/smallangle_whorl/1200_20_eabs ../caribu_out/June_15_2013/smallangle_whorl/1200_20_plant_positions ../caribu_out/June_15_2013/largeangle_whorl/1200_20_eabs ../caribu_out/June_15_2013/largeangle_whorl/1200_20_plant_positions
from pylab import *
import matplotlib.pyplot as plt
from operator import itemgetter
import scipy.stats as sp
from collections import defaultdict
import numpy as np
import sys
from scipy.optimize import curve_fit
from math import *
def consume(iterator, n):
collections.deque(itertools.islice(iterator, n))
if len(sys.argv) <= 4:
print("plotEABs.py 1_eabs.tsv 1_plant_position 2_eabs.tsv 2_plant_position")
sys.exit()
if sys.argv[1] == "--help" or sys.argv[1] == "-h":
print("plotEABs.py 1_eabs.tsv 1_plant_position 2_eabs.tsv 2_plant_position")
sys.exit()
try:
li_1_eabs = [line.strip() for line in open(sys.argv[1])]
li_1_eabs = [element.split('\t') for element in li_1_eabs]
li_1_pp = [line.strip() for line in open(sys.argv[2])]
li_1_pp = [element.split(' ') for element in li_1_pp]
li_2_eabs = [line.strip() for line in open(sys.argv[3])]
li_2_eabs = [element.split('\t') for element in li_2_eabs]
li_2_pp = [line.strip() for line in open(sys.argv[4])]
li_2_pp = [element.split(' ') for element in li_2_pp]
except IOError:
print("Cannot open target file. Please check your input:")
print("\t$ python plotEABs.py 1_eabs.tsv 1_planter_position 2_eabs.tsv 2_planter_position")
sys.exit()
###### convert KJ/m^2s^1 to umol/m^2s^1
###### 2000umol/m2/s = 9800FC = 1060W/m2 = 1060J/m2/s = 106000LUX.
###### 2000/1060 umol/J 1000/1 J/KJ
def KJ_to_umol(KJ):
return (2000/1060)*1000.0*KJ
li_1X = []
li_1Y = []
li_2X = []
li_2Y = []
li_1X_percent = []
li_1Y_percent = []
li_2X_percent = []
li_2Y_percent = []
LILI_1Y =[]
LILI_2Y =[]
LILI_1Y_percent = []
LILI_2Y_percent = []
LI_plant_row_pos = ["1.0466507177", "1.14633173844", "1.24601275917", "1.3456937799", "1.44537480064", "1.54505582137", "1.64473684211", "1.74441786284", "1.84409888357", "1.94377990431"]
LI_plant_col_pos = ["1.14", "1.9"]
row = int(0)
col = int(1)
for assayed_col in range(len(LI_plant_col_pos)):
for assayed_row in range(len(LI_plant_row_pos)):
for plant in range(len(li_1_pp)):
if (li_1_pp[plant][col] == LI_plant_col_pos[assayed_col]) and (li_1_pp[plant][row] == LI_plant_row_pos[assayed_row]):
for eabs in range(len(li_1_eabs[plant])):
if eabs%2==0 and (eabs !=len(li_1_eabs[plant])-1):
li_1Y.append(float(li_1_eabs[plant][eabs])+float(li_1_eabs[plant][eabs+1]))
for i in range(len(li_1Y)):
li_1Y_percent.append((li_1Y[i]/max(li_1Y))*100.0)
break
LILI_1Y.append(li_1Y)
LILI_1Y_percent.append(li_1Y_percent)
li_1Y = []
li_1Y_percent = []
for plant in range(len(li_2_pp)):
if (li_2_pp[plant][col] == LI_plant_col_pos[assayed_col]) and (li_2_pp[plant][row] == LI_plant_row_pos[assayed_row]):
for eabs in range(len(li_2_eabs[plant])):
if eabs%2==0 and (eabs !=len(li_2_eabs[plant])-1):
li_2Y.append(float(li_2_eabs[plant][eabs])+float(li_2_eabs[plant][eabs+1]))
for i in range(len(li_2Y)):
li_2Y_percent.append((li_2Y[i]/max(li_2Y))*100.0)
break
LILI_2Y.append(li_2Y)
LILI_2Y_percent.append(li_2Y_percent)
li_2Y = []
li_2Y_percent = []
for i in range(len(LILI_1Y[0])):
li_1X.append(float(i+1))
li_1X_percent.append(((float(i)+1.0)/float(len(LILI_1Y[0])))*100)
for phytomer in range(len(LILI_1Y[0])):
phytomer_energy = 0.0
for plant in range(len(LILI_1Y)):
phytomer_energy = phytomer_energy + LILI_1Y[plant][phytomer]
li_1Y.append(phytomer_energy)
for i in range(len(LILI_2Y[0])):
li_2X.append(float(i+1))
li_2X_percent.append(((float(i)+1.0)/float(len(LILI_2Y[0])))*100)
for phytomer in range(len(LILI_2Y[0])):
phytomer_energy = 0.0
for plant in range(len(LILI_2Y)):
phytomer_energy = phytomer_energy + LILI_2Y[plant][phytomer]
li_2Y.append(phytomer_energy)
light_available = sum(li_1Y)
li_1Y = list(reversed(li_1Y))
li_1X = list(reversed(li_1X))
li_1X_percent = list(reversed(li_1X_percent))
li_1Y_available = []
for phytomer in range(len(li_1Y)):
light_available = light_available - sum(li_1Y[phytomer])
li_1Y_available.append(light_available)
light_available = sum(li_2Y)
li_2Y = list(reversed(li_2Y))
li_2X = list(reversed(li_2X))
li_2X_percent = list(reversed(li_2X_percent))
li_2Y_available = []
for phytomer in range(len(li_2Y)):
light_available = light_available - sum(li_2Y[phytomer])
li_2Y_available.append(light_available)
# convert kJ to umol
li_1Y_available = KJ_to_umol(np.array(li_1Y_available))
li_2Y_available = KJ_to_umol(np.array(li_2Y_available))
# plot colors
small_color = "#4169E1"
large_color = "#FF8C00"
###### PAR(depth) = PAR(top)*(np.exp(k*depth))
def light_func((ril_depth, ril_height),k,PARtop):
return PARtop*(np.exp(-k*(ril_height-ril_depth)))
li_1X_percent_all = 100*(np.array(li_1X_percent)/(li_1X_percent[2]))
li_2X_percent_all = 100*(np.array(li_2X_percent)/(li_2X_percent[2]))
# whorl phytomers are set to the same depth:
li_1X_percent_all[0] = 100
li_2X_percent_all[0] = 100
li_1X_percent_all[1] = 100
li_2X_percent_all[1] = 100
# fit phytomers for k2
li_1X_percent = li_1X_percent_all[2::]
li_2X_percent = li_2X_percent_all[2::]
li_1Y_available_all = li_1Y_available
li_2Y_available_all = li_2Y_available
li_1Y_available = li_1Y_available[2::]
li_2Y_available = li_2Y_available[2::]
##### Fitting data to light absorption curve (Beer-Lambert's law)
pop1,popc = curve_fit(light_func, ((li_1X_percent), np.array(max(li_1X_percent))) , (li_1Y_available))
k_1, partop_1 = pop1
pop2,popc = curve_fit(light_func, ((li_2X_percent), np.array(max(li_2X_percent))) , (li_2Y_available))
k_2, partop_2 = pop2
##### Plot % depth and PAR available with curve fit
fig = plt.figure(figsize=(9*(2.0/3.0),5*(2.0/3.0)))
plot(np.array(li_1Y_available_all), li_1X_percent_all, '--o', color=small_color)
plot(np.array(li_2Y_available_all), li_2X_percent_all, '--o', color=large_color)
##### plot the curve_fit
plot(light_func((np.linspace(0.0, max(li_2X_percent)*2, 200), np.array(max(li_2X_percent))), k_2, partop_2), np.linspace(0., max(li_2X_percent)*2, 200), color=large_color,label=r'$k_{2}$ = %f' %(k_2))
plot(light_func((np.linspace(0.0, max(li_1X_percent)*2, 200), np.array(max(li_1X_percent))), k_1, partop_1), np.linspace(0., max(li_1X_percent)*2, 200), color=small_color,label=r'$k_{2}$ = %f' %(k_1))
plt.legend(numpoints=1, bbox_to_anchor=(1, 0), loc=4, borderaxespad=0.)
plt.xlabel('PAR available ' r'($\mu$mol $m^{-2} s^{-1}$)')
plt.ylabel('% depth')
plt.xlim(-15.0, 2200)
plt.ylim(-1.0, 104.0)
plt.savefig("vsorg_grain_k2.png", dpi=300, format="png")
| gpl-2.0 |
JPGlaser/Tycho | src/tycho/create.py | 1 | 19021 | # Python Classes/Functions used to Creating Tycho's Elements
# ------------------------------------- #
# Python Package Importing #
# ------------------------------------- #
# Importing Necessary System Packages
import math
import numpy as np
import matplotlib as plt
import numpy.random as rp
import random
# Import the Amuse Base Packages
from amuse import datamodel
from amuse.units import nbody_system
from amuse.units import units
from amuse.units import constants
from amuse.datamodel import particle_attributes
from amuse.io import *
from amuse.lab import *
# Import the Amuse Stellar Packages
from amuse.ic.kingmodel import new_king_model
from amuse.ic.kroupa import new_kroupa_mass_distribution
from amuse.ext.orbital_elements import new_binary_from_orbital_elements
from numpy.random import MT19937
from numpy.random import RandomState, SeedSequence
from tycho import util
# ------------------------------------- #
# Defining Functions #
# ------------------------------------- #
def king_cluster_v2(num_stars, **kwargs):
''' Creates an open cluster according to the King Model & Kroupa IMF
num_stars: The total number of stellar systems.
w0: The King density parameter.
vradius: The virial radius of the cluster.
seed: The random seed used for cluster generation.
do_binaries: Turn on/off binary creation.
binary_recursions: The number of times a star is tested to be a binary.
split_binaries: Turn on/off splitting Binary CoM into individual Companions.
'''
# Check Keyword Arguments
w0 = kwargs.get("w0", 2.5)
virial_radius = kwargs.get("vradius", 2 | units.parsec)
rand_seed = kwargs.get("seed", 7)
do_binaries = kwargs.get("do_binaries", True)
binary_recursions = kwargs.get("binary_recursions", 1)
split_binaries = kwargs.get("split_binaries", True)
# Check if rand_seed is a integer or not. Convert it if it isn't.
if not type(rand_seed) == type(1):
rand_seed = util.new_seed_from_string(rand_seed)
# Apply the Seed for the Cluster
rs = RandomState(MT19937(SeedSequence(rand_seed)))
np.random.seed(rand_seed)
random.seed(rand_seed)
min_stellar_mass = 100 | units.MJupiter
max_stellar_mass = 10 | units.MSun
# Creates a List of Primary Masses (in SI Units) Drawn from the Kroupa IMF
Masses_SI = util.new_truncated_kroupa(num_stars)
# If Primordial Binaries are Desired, Start the Conversion Process
if do_binaries:
# Find Sutable CoM Objects to Turn into Binaries, Update the System
# Mass, and record the CoM's Index to Remove it Later.
Masses_SI, ids_to_become_binaries = find_possible_binaries_v2(Masses_SI,
binary_recursions = binary_recursions)
# Creates the SI-to-NBody Converter
converter = nbody_system.nbody_to_si(Masses_SI.sum(), virial_radius)
# Creates a AMUS Particle Set Consisting of Positions (King) and Masses (Kroupa)
stars_SI = new_king_model(num_stars, w0, convert_nbody=converter)
stars_SI.mass = Masses_SI
# Assigning Type of System ('star' or 'primordial binary')
stars_SI.type = "star"
if do_binaries:
for com_index in ids_to_become_binaries:
stars_SI[com_index].type = "primordial binary"
# Shifts Cluster's CoM to the Origin Before Scaling to Virial Equilibrium
stars_SI.move_to_center()
if num_stars == 1:
pass
else:
stars_SI.scale_to_standard(convert_nbody=converter)
# Assigning SOI Estimate for Interaction Radius
if num_stars == 1:
stars_SI.radius = 2000*stars_SI.mass/(1.0 | units.MSun) | units.AU
else:
# Temporary Solution
stars_SI.radius = 2000*stars_SI.mass/(1.0 | units.MSun) | units.AU
# Need to think of a better way to calculate the SOI
# stars_SI.radius = 100*util.calc_SOI(stars_SI.mass, np.var(stars_SI.velocity), G=units.constants.G)
# If Requested, Split Binary Systems into Seperate Particles
if do_binaries:
# Define the Binary Set
binaries = Particles()
singles_in_binaries = Particles()
com_to_remove = Particles()
# Create a Kepler Worker to Ensure the Binaries are Approaching
BinaryConverter = nbody_system.nbody_to_si(2*np.mean(stars_SI.mass),
2*np.mean(stars_SI.radius))
kep = Kepler(unit_converter = BinaryConverter, redirection = 'none')
# Split the Binary into its Companions & Store in Seperate Sets
for com_index in ids_to_become_binaries:
stars_SI[com_index].id = com_index
binary_particle, singles_in_binary = binary_system_v2(stars_SI[com_index], stars_SI, kepler_worker=kep)
binaries.add_particle(binary_particle)
singles_in_binaries.add_particle(singles_in_binary)
com_to_remove.add_particle(stars_SI[com_index])
# If Desired, Remove the CoM and Replace it with the Single Companions.
# Note: Default is to do this until we get multiples.py and encounters.py
# to match functionality exactly plus Kira.py features.
if split_binaries:
stars_SI.remove_particles(com_to_remove)
stars_SI.add_particles(singles_in_binaries)
# Set Particle Ids for Easy Referencing
stars_SI.id = np.arange(len(stars_SI)) + 1
# Final Radius Setting (Ensuring that the Interaction Distance is not Small)
min_stellar_radius = 1000 | units.AU
for star in stars_SI:
if star.radius < min_stellar_radius:
star.radius = min_stellar_radius
# Return the Desired Particle Sets and Required Converter
if do_binaries:
kep.stop()
return stars_SI, converter, binaries, singles_in_binaries
else:
return stars_SI, converter
def find_possible_binaries_v2(com_mass_array, **kwargs):
binary_recursions = kwargs.get("binary_recursions", 1)
min_stellar_mass = kwargs.get("min_mass", 100 | units.MJupiter)
max_stellar_mass = kwargs.get("max_mass", 10 | units.MSun)
ids_to_become_binaries = []
recursion_counter = 0
while recursion_counter < binary_recursions:
recursion_counter += 1
current_com_id = 0
for com_mass in com_mass_array:
assigned_probability = rp.uniform(0, 1)
#print(assigned_probability)
if not current_com_id in ids_to_become_binaries:
fb = 0.2 * np.log10(com_mass.value_in(units.MSun)) + 0.5
if assigned_probability <= fb:
# If the Assigned Probability is LTE the Binary Likihood ...
# Add the Index to the Array for Later CoM Removal
ids_to_become_binaries.append(current_com_id)
# Draw a Distrubution of Kroupa Masses
possible_extra_mass = util.new_truncated_kroupa(100)
# Randomly Select one of the Above Masses
selected_index = int(np.floor(100*rp.uniform(0, 1)))
selected_extra_mass = possible_extra_mass[selected_index]
#print(selected_extra_mass)
# Add the Selected Mass to the Current CoM Mass
com_mass_array[current_com_id] += selected_extra_mass
current_com_id += 1
return com_mass_array, ids_to_become_binaries
def binary_system_v2(star_to_become_binary, set_of_stars, **kwargs):
# Check Keyword Arguments
doFlatEcc = kwargs.get("FlatEcc",True) # Apply Uniform Eccentricity Distribution
doBasic = kwargs.get("Basic", False) # Apply a Basic Binary Distribution
doFlatQ = kwargs.get("FlatQ",True) # Apply a Uniform Mass-Ratio Distribution
doRag_P = kwargs.get("RagP",True) # Apply Raghavan et al. (2010) Period Distribution
doSana_P = kwargs.get("SanaP", False) # Apply Sana et al. (2012) Period Distribution
Pcirc = kwargs.get("Pcirc", 6 | units.day ) # Circularization Period
Pmin = kwargs.get("Pmin", 10.**-1. | units.day ) # Min Orbital Period Allowed
Pmax = kwargs.get("Pmax", 10.**7. | units.day ) # Max Orbital Period Allowed
kepler_worker = kwargs.get("kepler_worker", None)
# Define Original Star's Information
rCM = star_to_become_binary.position
vCM = star_to_become_binary.velocity
# Define Initial Binary Particle Set
singles_in_binary = Particles(2)
star1 = singles_in_binary[0]
star2 = singles_in_binary[1]
star1.type = 'star'
star2.type = 'star'
star1.mass = 0. | units.MSun
star2.mass = 0. | units.MSun
# If Desired, Apply a Basic Binary Distribution
if (doBasic):
semimajor_axis = 500. | units.AU
e = 0.
star1.mass = 0.5*star_to_become_binary.mass
star2.mass = 0.5*star_to_become_binary.mass
# If Desired, Apply the Uniform Mass-Ratio Distribution (Goodwin, 2012)
if (doFlatQ):
min_stellar_mass = 100. | units.MJupiter # Greater Mass Than "AB Doradus C"
while star2.mass <= min_stellar_mass:
q = np.random.random_sample()
star1.mass = star_to_become_binary.mass / (1. + q)
star2.mass = q * star1.mass
# If Desired, Apply Uniform Eccentricity Distribution
if (doFlatEcc):
e = rp.uniform(0.0,1.0)
# Set the Maximum Period Allowed by Perturbers
Pmax_by_perturber = set_max_period_from_perturber(star_to_become_binary, set_of_stars, eccentricity= e)
if Pmax_by_perturber < Pmax:
Pmax = Pmax_by_perturber
# If Desired, Apply Raghavan et al. (2010) Period Distribution
if (doRag_P):
sigma = 2.28
mu = 5.03
period = 2.*Pmax
while (period > Pmax or period < Pmin):
#logP = sigma * np.random.randn() + mu
logP = np.random.normal(loc=mu, scale=sigma)
period = 10.**logP | units.day
semimajor_axis = ((period**2.)/(4.*np.pi**2.)*constants.G*(star1.mass+star2.mass))**(1./3.)
# If Desired & Applicable, Apply Sana et al. (2012) Period Distribution
if (doSana_P and star1.mass > 15 | units.MSun):
maxLogP = np.log10(Pmax.value_in(units.day))
minLogP = np.log10(Pmin.value_in(units.day))
pMod = -0.55 + 1.
x1 = np.random.random()
logP = ((maxLogP**pMod-minLogP**pMod)*x1 + minLogP**pMod)**(1./pMod)
period = 10.**logP | units.day
semimajor_axis = ((period**2.)/(4.*np.pi**2.)*constants.G*(star1.mass+star2.mass))**(1./3.)
# Always circularize low period Binaries
if (period < Pcirc):
e = 0.0
# Get the Companion's Positions from Kepler Relative to the Origin
newBinary = new_binary_from_orbital_elements(star1.mass, star2.mass, semimajor_axis,
eccentricity = e, G = constants.G)
# Rotate the Binary System & Move to the CoM's Position
util.preform_EulerRotation(newBinary)
star1.position = rCM + newBinary[0].position
star1.velocity = vCM + newBinary[0].velocity
star2.position = rCM + newBinary[1].position
star2.velocity = vCM + newBinary[1].velocity
# Apply a Fitting Dynamical Radius
singles_in_binary.radius = semimajor_axis*(1+e)
# Ensure Binary Components are Approaching Each Other
if kepler_worker == None:
BinaryConverter = nbody_system.nbody_to_si(2*np.mean(singles_in_binary.mass),
2*np.mean(singles_in_binary.radius))
kep = Kepler(unit_converter = BinaryConverter, redirection = 'none')
else:
kep = kepler_worker
star1, star2 = util.ensure_approaching_binary(star1, star2, kepler_worker=kep)
# Create the Binary System Particle (For Stellar Evolution Code)
star_to_become_binary.radius = 5*semimajor_axis
binary_particle = star_to_become_binary.copy()
binary_particle.child1 = star1
binary_particle.child2 = star2
binary_particle.semimajor_axis = semimajor_axis
binary_particle.eccentricity = e
binary_particle.id = star_to_become_binary.id
# Return the Binary System Particle & the Particle Set of Individual Companions
return binary_particle, singles_in_binary
def planetary_systems_v2(stars, num_systems, **kwargs):
''' Creates several mock planetary systems around random stars in the provided set.
stars: The AMUSE Particle Set containing stellar information.
num_systems: The number of planetary systems requested.
filename_planets: Filename for the Initial Planetary System HDF5 Archive.
Earth, Jupiter, Neptune: Booleans asking if they should be included.
'''
makeEarth = kwargs.get("Earth", False)
makeJupiter = kwargs.get("Jupiter", True)
makeNeptune = kwargs.get("Neptune", False)
makeTestPlanet = kwargs.get("TestP", False)
kepler_worker = kwargs.get("kepler_worker", None)
if kepler_worker == None:
SmallScaleConverter = nbody_system.nbody_to_si(2*np.mean(stars.mass),
2*np.mean(stars.radius))
kep = Kepler(unit_converter = SmallScaleConverter, redirection = 'none')
else:
kep = kepler_worker
# Selects the Stars to Become Planetary Systems
num_stars = len(stars)
if num_systems > num_stars:
num_systems = num_stars
select_stars_indices = random.sample(range(0, num_stars), num_systems)
# Sets Important Parameters
ID_Earth = 30000
ID_Jupiter = 50000
ID_Neptune = 80000
systems = datamodel.Particles()
# Begins to Build Planetary Systems According to Provided Information
for system in range(num_systems):
planets = datamodel.Particles()
j = select_stars_indices[system]
host_star = stars[j]
mu = constants.G*host_star.mass
if makeEarth:
period_ratio = np.sqrt((1.000 | units.AU)**3/(5.454 | units.AU)**3)
mass_E = 0.003 | units.MJupiter
init_a = util.calc_RelativePlanetPlacement(host_star, mass_E, period_ratio)
init_e = 0.016
Earth = planet_v2(ID_Earth+host_star.id, host_star, mass_E, init_a, init_e)
Earth.stellar_type = 1
planets.add_particle(Earth)
if makeJupiter:
init_a = util.calc_JovianPlacement(host_star)
init_e = 0.048
mass_J = 1 | units.MJupiter
Jupiter = planet_v2(ID_Jupiter+host_star.id, host_star, mass_J, init_a, init_e)
Jupiter.stellar_type = 1
planets.add_particle(Jupiter)
if makeTestPlanet:
init_a = util.calc_JovianPlacement(host_star)
init_e = 0.3
mass_J = 20 | units.MJupiter
TestP = planet_v2(ID_Jupiter+host_star.id, host_star, mass_J, init_a, init_e)
TestP.stellar_type = 1
planets.add_particle(TestP)
if makeNeptune:
period_ratio = np.sqrt((30.110 | units.AU)**3/(5.454 | units.AU)**3)
mass_N = 0.054 | units.MJupiter
init_a = util.calc_RelativePlanetPlacement(host_star, mass_N, period_ratio)
init_e = 0.009
Neptune = planet_v2(ID_Neptune+host_star.id, host_star, mass_N, init_a, init_e)
Neptune.stellar_type = 1
planets.add_particle(Neptune)
# Moves Planetary System to the Origin and Applies a Random Euler Rotation
for p in planets:
p.position = p.position - host_star.position
p.velocity = p.velocity - host_star.velocity
util.preform_EulerRotation(planets)
for p in planets:
p.position = p.position + host_star.position
p.velocity = p.velocity + host_star.velocity
# Ensures The Planets are Approaching
for p in planets:
host_star, p = util.ensure_approaching_binary(host_star, p, kepler_worker=kep)
if kepler_worker == None:
kep.stop()
# Adds the System to the Provided AMUSE Particle Set
systems.add_particles(planets)
return systems
def planet_v2(ID, host_star, planet_mass, init_a, init_e, random_orientation=False):
''' Creates a planet as an AMUSE Particle with provided characteristics.
ID: Identifying number unique to this planet.
host_star: The AMUSE Particle that is the host star for the planet.
planet_mass: The mass of the planet (in the nbody units).
init_a: Initial semi-major axis (in nbody units).
init_e: Initial eccentricity (in nbody units).
random_orientation: Boolean to incline the planet in a random fashion.
'''
# Define the Host Star's Original Location & Position
rCM = host_star.position
vCM = host_star.velocity
# Sets Planet Values to Provided Conditions
p = datamodel.Particle()
p.id = ID
p.type = "planet"
p.host_star = host_star.id
p.mass = planet_mass
# Sets the Dynamical Radius to the Hill Sphere Approx.
p.radius = util.calc_HillRadius(init_a, init_e, p.mass, host_star.mass)
# Generate a Random Position on the Orbit (True Anomaly)
# This ensures that all the planets don't start out along the same joining line.
init_ta = 360*np.random.random() | units.deg
# Get the Host Star & Planets Positions from Kepler Relative to the Origin
newPSystem = new_binary_from_orbital_elements(host_star.mass, p.mass, init_a,
eccentricity = init_e,
true_anomaly = init_ta,
G = constants.G)
# Rotate the Binary System & Move to the CoM's Position
if random_orientation:
util.preform_EulerRotation(newPSystem)
host_star.position = rCM + newPSystem[0].position
host_star.velocity = vCM + newPSystem[0].velocity
p.position = rCM + newPSystem[1].position
p.velocity = vCM + newPSystem[1].velocity
# Returns the Created AMUSE Particle
return p
def set_max_period_from_perturber(center_of_mass, particle_set, **kwargs):
perturb_limit = kwargs.get("perturbation_limit", 0.02)
e = kwargs.get("eccentricity", 0.0)
verbose = kwargs.get("verbose", False)
# Calculate Nearest Neighbors
other_stars = particle_set - center_of_mass
dist_vect = other_stars.position - center_of_mass.position
distances = dist_vect.lengths()
# Calculate Perterbation of Nearest Neighbors on System
pert = other_stars.mass / distances**3
primary_pert_index = np.where(pert == max(pert))[0]
primary_perturber = other_stars[primary_pert_index][0]
perturb_distance = distances[primary_pert_index][0]
perturb_mass = primary_perturber.mass
# Calculate Maximum Period
P_max_Squared = (4*np.power(np.pi, 2)*(perturb_distance**3)*perturb_limit)/(units.constants.G*((1+e)**3)*perturb_mass)
P_max = np.sqrt(P_max_Squared)[0]
if verbose:
print("Limiting Maximum Period to", P_max, "in accordance to the largest perturber at index", primary_pert_index)
print("Distance to Perturber:", perturb_distance, " | Mass of Perturber:", perturb_mass)
return P_max
| mit |
bluesquall/okeanidanalysis | examples/timestamps.py | 1 | 2271 | #!/usr/bin/env python
import time, datetime, pytz
import numpy as np
import matplotlib as mpl
import matplotlib.dates
import okeanidanalysis as oa
def main(logfile):
if type(logfile) is not str: s = oa.logs.OkeanidLog(logfile.name)
else: s = oa.logs.OkeanidLog(logfile)
# LRAUV logs saved as `.mat` files in HDF5 format use the MATLAB datenum. The timezone is UTC, but that is not explicitly stated in the log file.
start_matlab_datenum = s['depth/time'][0][0]
# If you access a timeseries using the OkeanidLog.timeseries method, you will get a matplotlib datenum
depth, t_depth = s.timeseries('depth')
start_matplotlib_datenum = t_depth[0]
print('matplotlib datenum is {0} less than MATLAB datenum (should be 366)'.format(start_matlab_datenum-start_matplotlib_datenum))
# You can use methods in matplotlib.dates to convert easily to python datetime objects
python_datetime_depth = matplotlib.dates.num2date(t_depth, tz=pytz.UTC)
start_python_datetime = python_datetime_depth[0]
# or to epoch seconds
unix_epoch_depth = matplotlib.dates.num2epoch(t_depth)
start_unix_epoch = unix_epoch_depth[0]
# or to epoch milliseconds (i.e., mtime)
mtime_depth = np.rint(matplotlib.dates.num2epoch(t_depth) * 1e3).astype(np.uint64)
start_mtime = mtime_depth[0]
# or to epoch microseconds (i.e., utime)
utime_depth = np.rint(matplotlib.dates.num2epoch(t_depth) * 1e6).astype(np.uint64)
start_utime = utime_depth[0]
msg = 'Depth record starts at:\n\t{0}\n\t{1} [Unix epoch seconds]\n\t{2} [Unix epoch milliseconds]\n\t{3} [Unix epoch microseconds]\n\t{4} [matplotlib days]\n\t{5} [matlab days]'
print(msg.format(start_python_datetime, start_unix_epoch, start_mtime, start_utime, start_matplotlib_datenum, start_matlab_datenum))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='explain timestamps in LRAUV log')
logfile = parser.add_argument('logfile', type=argparse.FileType('rb'),
help='the log to use in the example')
parser.add_argument('-V', '--version', action='version',
version='%(prog)s 0.0.1',
help='display version information and exit')
args = parser.parse_args()
main(**args.__dict__)
| mit |
syl20bnr/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_macosx.py | 69 | 15397 | from __future__ import division
import os
import numpy
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase, NavigationToolbar2
from matplotlib.cbook import maxdict
from matplotlib.figure import Figure
from matplotlib.path import Path
from matplotlib.mathtext import MathTextParser
from matplotlib.colors import colorConverter
from matplotlib.widgets import SubplotTool
import matplotlib
from matplotlib.backends import _macosx
def show():
"""Show all the figures and enter the Cocoa mainloop.
This function will not return until all windows are closed or
the interpreter exits."""
# Having a Python-level function "show" wrapping the built-in
# function "show" in the _macosx extension module allows us to
# to add attributes to "show". This is something ipython does.
_macosx.show()
class RendererMac(RendererBase):
"""
The renderer handles drawing/rendering operations. Most of the renderer's
methods forwards the command to the renderer's graphics context. The
renderer does not wrap a C object and is written in pure Python.
"""
texd = maxdict(50) # a cache of tex image rasters
def __init__(self, dpi, width, height):
RendererBase.__init__(self)
self.dpi = dpi
self.width = width
self.height = height
self.gc = GraphicsContextMac()
self.mathtext_parser = MathTextParser('MacOSX')
def set_width_height (self, width, height):
self.width, self.height = width, height
def draw_path(self, gc, path, transform, rgbFace=None):
if rgbFace is not None:
rgbFace = tuple(rgbFace)
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
gc.draw_path(path, transform, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
if rgbFace is not None:
rgbFace = tuple(rgbFace)
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
gc.draw_markers(marker_path, marker_trans, path, trans, rgbFace)
def draw_path_collection(self, *args):
gc = self.gc
args = args[:13]
gc.draw_path_collection(*args)
def draw_quad_mesh(self, *args):
gc = self.gc
gc.draw_quad_mesh(*args)
def new_gc(self):
self.gc.reset()
return self.gc
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
im.flipud_out()
nrows, ncols, data = im.as_rgba_str()
self.gc.draw_image(x, y, nrows, ncols, data, bbox, clippath, clippath_trans)
im.flipud_out()
def draw_tex(self, gc, x, y, s, prop, angle):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
# todo, handle props, angle, origins
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
key = s, size, self.dpi, angle, texmanager.get_font_config()
im = self.texd.get(key) # Not sure what this does; just copied from backend_agg.py
if im is None:
Z = texmanager.get_grey(s, size, self.dpi)
Z = numpy.array(255.0 - Z * 255.0, numpy.uint8)
gc.draw_mathtext(x, y, angle, Z)
def _draw_mathtext(self, gc, x, y, s, prop, angle):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
size = prop.get_size_in_points()
ox, oy, width, height, descent, image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
gc.draw_mathtext(x, y, angle, 255 - image.as_array())
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
if ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
else:
family = prop.get_family()
size = prop.get_size_in_points()
weight = prop.get_weight()
style = prop.get_style()
gc.draw_text(x, y, unicode(s), family, size, weight, style, angle)
def get_text_width_height_descent(self, s, prop, ismath):
if ismath=='TeX':
# TODO: handle props
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
Z = texmanager.get_grey(s, size, self.dpi)
m,n = Z.shape
# TODO: handle descent; This is based on backend_agg.py
return n, m, 0
if ismath:
ox, oy, width, height, descent, fonts, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
family = prop.get_family()
size = prop.get_size_in_points()
weight = prop.get_weight()
style = prop.get_style()
return self.gc.get_text_width_height_descent(unicode(s), family, size, weight, style)
def flipy(self):
return False
def points_to_pixels(self, points):
return points/72.0 * self.dpi
def option_image_nocomposite(self):
return True
class GraphicsContextMac(_macosx.GraphicsContext, GraphicsContextBase):
"""
The GraphicsContext wraps a Quartz graphics context. All methods
are implemented at the C-level in macosx.GraphicsContext. These
methods set drawing properties such as the line style, fill color,
etc. The actual drawing is done by the Renderer, which draws into
the GraphicsContext.
"""
def __init__(self):
GraphicsContextBase.__init__(self)
_macosx.GraphicsContext.__init__(self)
def set_foreground(self, fg, isRGB=False):
if not isRGB:
fg = colorConverter.to_rgb(fg)
_macosx.GraphicsContext.set_foreground(self, fg)
def set_clip_rectangle(self, box):
GraphicsContextBase.set_clip_rectangle(self, box)
if not box: return
_macosx.GraphicsContext.set_clip_rectangle(self, box.bounds)
def set_clip_path(self, path):
GraphicsContextBase.set_clip_path(self, path)
if not path: return
path = path.get_fully_transformed_path()
_macosx.GraphicsContext.set_clip_path(self, path)
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For performance reasons, we don't want to redraw the figure after
each draw command. Instead, we mark the figure as invalid, so that
it will be redrawn as soon as the event loop resumes via PyOS_InputHook.
This function should be called after each draw event, even if
matplotlib is not running interactively.
"""
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.invalidate()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
canvas = FigureCanvasMac(figure)
manager = FigureManagerMac(canvas, num)
return manager
class FigureCanvasMac(_macosx.FigureCanvas, FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
Events such as button presses, mouse movements, and key presses
are handled in the C code and the base class methods
button_press_event, button_release_event, motion_notify_event,
key_press_event, and key_release_event are called from there.
"""
def __init__(self, figure):
FigureCanvasBase.__init__(self, figure)
width, height = self.get_width_height()
self.renderer = RendererMac(figure.dpi, width, height)
_macosx.FigureCanvas.__init__(self, width, height)
def resize(self, width, height):
self.renderer.set_width_height(width, height)
dpi = self.figure.dpi
width /= dpi
height /= dpi
self.figure.set_size_inches(width, height)
def print_figure(self, filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', **kwargs):
if dpi is None: dpi = matplotlib.rcParams['savefig.dpi']
filename = unicode(filename)
root, ext = os.path.splitext(filename)
ext = ext[1:].lower()
if not ext:
ext = "png"
filename = root + "." + ext
if ext=="jpg": ext = "jpeg"
# save the figure settings
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
# set the new parameters
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
if ext in ('jpeg', 'png', 'tiff', 'gif', 'bmp'):
width, height = self.figure.get_size_inches()
width, height = width*dpi, height*dpi
self.write_bitmap(filename, width, height)
elif ext == 'pdf':
self.write_pdf(filename)
elif ext in ('ps', 'eps'):
from backend_ps import FigureCanvasPS
# Postscript backend changes figure.dpi, but doesn't change it back
origDPI = self.figure.dpi
fc = self.switch_backends(FigureCanvasPS)
fc.print_figure(filename, dpi, facecolor, edgecolor,
orientation, **kwargs)
self.figure.dpi = origDPI
self.figure.set_canvas(self)
elif ext=='svg':
from backend_svg import FigureCanvasSVG
fc = self.switch_backends(FigureCanvasSVG)
fc.print_figure(filename, dpi, facecolor, edgecolor,
orientation, **kwargs)
self.figure.set_canvas(self)
else:
raise ValueError("Figure format not available (extension %s)" % ext)
# restore original figure settings
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
class FigureManagerMac(_macosx.FigureManager, FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
"""
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
title = "Figure %d" % num
_macosx.FigureManager.__init__(self, canvas, title)
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbarMac(canvas)
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2Mac(canvas)
else:
self.toolbar = None
if self.toolbar is not None:
self.toolbar.update()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
# This is ugly, but this is what tkagg and gtk are doing.
# It is needed to get ginput() working.
self.canvas.figure.show = lambda *args: self.show()
def show(self):
self.canvas.draw()
def close(self):
Gcf.destroy(self.num)
class NavigationToolbarMac(_macosx.NavigationToolbar):
def __init__(self, canvas):
self.canvas = canvas
basedir = os.path.join(matplotlib.rcParams['datapath'], "images")
images = {}
for imagename in ("stock_left",
"stock_right",
"stock_up",
"stock_down",
"stock_zoom-in",
"stock_zoom-out",
"stock_save_as"):
filename = os.path.join(basedir, imagename+".ppm")
images[imagename] = self._read_ppm_image(filename)
_macosx.NavigationToolbar.__init__(self, images)
self.message = None
def _read_ppm_image(self, filename):
data = ""
imagefile = open(filename)
for line in imagefile:
if "#" in line:
i = line.index("#")
line = line[:i] + "\n"
data += line
imagefile.close()
magic, width, height, maxcolor, imagedata = data.split(None, 4)
width, height = int(width), int(height)
assert magic=="P6"
assert len(imagedata)==width*height*3 # 3 colors in RGB
return (width, height, imagedata)
def panx(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].xaxis.pan(direction)
self.canvas.invalidate()
def pany(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].yaxis.pan(direction)
self.canvas.invalidate()
def zoomx(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].xaxis.zoom(direction)
self.canvas.invalidate()
def zoomy(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].yaxis.zoom(direction)
self.canvas.invalidate()
def save_figure(self):
filename = _macosx.choose_save_file('Save the figure')
if filename is None: # Cancel
return
self.canvas.print_figure(filename)
class NavigationToolbar2Mac(_macosx.NavigationToolbar2, NavigationToolbar2):
def __init__(self, canvas):
NavigationToolbar2.__init__(self, canvas)
def _init_toolbar(self):
basedir = os.path.join(matplotlib.rcParams['datapath'], "images")
_macosx.NavigationToolbar2.__init__(self, basedir)
def draw_rubberband(self, event, x0, y0, x1, y1):
self.canvas.set_rubberband(x0, y0, x1, y1)
def release(self, event):
self.canvas.remove_rubberband()
def set_cursor(self, cursor):
_macosx.set_cursor(cursor)
def save_figure(self):
filename = _macosx.choose_save_file('Save the figure')
if filename is None: # Cancel
return
self.canvas.print_figure(filename)
def prepare_configure_subplots(self):
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasMac(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
return canvas
def set_message(self, message):
_macosx.NavigationToolbar2.set_message(self, message.encode('utf-8'))
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureManager = FigureManagerMac
| gpl-3.0 |
mbaijal/incubator-mxnet | example/gluon/dcgan.py | 7 | 8812 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot as plt
import argparse
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
from mxnet import autograd
import numpy as np
import logging
from datetime import datetime
import os
import time
def fill_buf(buf, i, img, shape):
n = buf.shape[0]//shape[1]
m = buf.shape[1]//shape[0]
sx = (i%m)*shape[0]
sy = (i//m)*shape[1]
buf[sy:sy+shape[1], sx:sx+shape[0], :] = img
return None
def visual(title, X, name):
assert len(X.shape) == 4
X = X.transpose((0, 2, 3, 1))
X = np.clip((X - np.min(X))*(255.0/(np.max(X) - np.min(X))), 0, 255).astype(np.uint8)
n = np.ceil(np.sqrt(X.shape[0]))
buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8)
for i, img in enumerate(X):
fill_buf(buff, i, img, X.shape[1:3])
buff = buff[:,:,::-1]
plt.imshow(buff)
plt.title(title)
plt.savefig(name)
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='cifar10', help='dataset to use. options are cifar10 and imagenet.')
parser.add_argument('--batch-size', type=int, default=64, help='input batch size')
parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--nepoch', type=int, default=25, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--outf', default='./results', help='folder to output images and model checkpoints')
parser.add_argument('--check-point', default=True, help="save results at each epoch or not")
opt = parser.parse_args()
print(opt)
logging.basicConfig(level=logging.DEBUG)
ngpu = int(opt.ngpu)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
nc = 3
if opt.cuda:
ctx = mx.gpu(0)
else:
ctx = mx.cpu()
check_point = bool(opt.check_point)
outf = opt.outf
if not os.path.exists(outf):
os.makedirs(outf)
def transformer(data, label):
# resize to 64x64
data = mx.image.imresize(data, 64, 64)
# transpose from (64, 64, 3) to (3, 64, 64)
data = mx.nd.transpose(data, (2,0,1))
# normalize to [-1, 1]
data = data.astype(np.float32)/128 - 1
# if image is greyscale, repeat 3 times to get RGB image.
if data.shape[0] == 1:
data = mx.nd.tile(data, (3, 1, 1))
return data, label
train_data = gluon.data.DataLoader(
gluon.data.vision.MNIST('./data', train=True, transform=transformer),
batch_size=opt.batch_size, shuffle=True, last_batch='discard')
val_data = gluon.data.DataLoader(
gluon.data.vision.MNIST('./data', train=False, transform=transformer),
batch_size=opt.batch_size, shuffle=False)
# build the generator
netG = nn.Sequential()
with netG.name_scope():
# input is Z, going into a convolution
netG.add(nn.Conv2DTranspose(ngf * 8, 4, 1, 0, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 4 x 4
netG.add(nn.Conv2DTranspose(ngf * 4, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 8 x 8
netG.add(nn.Conv2DTranspose(ngf * 2, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 16 x 16
netG.add(nn.Conv2DTranspose(ngf, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 32 x 32
netG.add(nn.Conv2DTranspose(nc, 4, 2, 1, use_bias=False))
netG.add(nn.Activation('tanh'))
# state size. (nc) x 64 x 64
# build the discriminator
netD = nn.Sequential()
with netD.name_scope():
# input is (nc) x 64 x 64
netD.add(nn.Conv2D(ndf, 4, 2, 1, use_bias=False))
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 32 x 32
netD.add(nn.Conv2D(ndf * 2, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 16 x 16
netD.add(nn.Conv2D(ndf * 4, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 8 x 8
netD.add(nn.Conv2D(ndf * 8, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 4 x 4
netD.add(nn.Conv2D(2, 4, 1, 0, use_bias=False))
# loss
loss = gluon.loss.SoftmaxCrossEntropyLoss()
# initialize the generator and the discriminator
netG.initialize(mx.init.Normal(0.02), ctx=ctx)
netD.initialize(mx.init.Normal(0.02), ctx=ctx)
# trainer for the generator and the discriminator
trainerG = gluon.Trainer(netG.collect_params(), 'adam', {'learning_rate': opt.lr, 'beta1': opt.beta1})
trainerD = gluon.Trainer(netD.collect_params(), 'adam', {'learning_rate': opt.lr, 'beta1': opt.beta1})
# ============printing==============
real_label = mx.nd.ones((opt.batch_size,), ctx=ctx)
fake_label = mx.nd.zeros((opt.batch_size,), ctx=ctx)
metric = mx.metric.Accuracy()
print('Training... ')
stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
iter = 0
for epoch in range(opt.nepoch):
tic = time.time()
btic = time.time()
for data, _ in train_data:
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# train with real_t
data = data.as_in_context(ctx)
noise = mx.nd.random.normal(0, 1, shape=(opt.batch_size, nz, 1, 1), ctx=ctx)
with autograd.record():
output = netD(data)
output = output.reshape((opt.batch_size, 2))
errD_real = loss(output, real_label)
metric.update([real_label,], [output,])
fake = netG(noise)
output = netD(fake.detach())
output = output.reshape((opt.batch_size, 2))
errD_fake = loss(output, fake_label)
errD = errD_real + errD_fake
errD.backward()
metric.update([fake_label,], [output,])
trainerD.step(opt.batch_size)
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
with autograd.record():
output = netD(fake)
output = output.reshape((-1, 2))
errG = loss(output, real_label)
errG.backward()
trainerG.step(opt.batch_size)
name, acc = metric.get()
# logging.info('speed: {} samples/s'.format(opt.batch_size / (time.time() - btic)))
logging.info('discriminator loss = %f, generator loss = %f, binary training acc = %f at iter %d epoch %d' %(mx.nd.mean(errD).asscalar(), mx.nd.mean(errG).asscalar(), acc, iter, epoch))
if iter % 1 == 0:
visual('gout', fake.asnumpy(), name=os.path.join(outf,'fake_img_iter_%d.png' %iter))
visual('data', data.asnumpy(), name=os.path.join(outf,'real_img_iter_%d.png' %iter))
iter = iter + 1
btic = time.time()
name, acc = metric.get()
metric.reset()
logging.info('\nbinary training acc at epoch %d: %s=%f' % (epoch, name, acc))
logging.info('time: %f' % (time.time() - tic))
if check_point:
netG.save_parameters(os.path.join(outf,'generator_epoch_%d.params' %epoch))
netD.save_parameters(os.path.join(outf,'discriminator_epoch_%d.params' % epoch))
netG.save_parameters(os.path.join(outf, 'generator.params'))
netD.save_parameters(os.path.join(outf, 'discriminator.params'))
| apache-2.0 |
meteorcloudy/tensorflow | tensorflow/examples/get_started/regression/test.py | 41 | 4037 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple smoke test that runs these examples for 1 training iteration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import pandas as pd
from six.moves import StringIO
import tensorflow.examples.get_started.regression.imports85 as imports85
sys.modules["imports85"] = imports85
# pylint: disable=g-bad-import-order,g-import-not-at-top
import tensorflow.contrib.data as data
import tensorflow.examples.get_started.regression.dnn_regression as dnn_regression
import tensorflow.examples.get_started.regression.linear_regression as linear_regression
import tensorflow.examples.get_started.regression.linear_regression_categorical as linear_regression_categorical
import tensorflow.examples.get_started.regression.custom_regression as custom_regression
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
# pylint: disable=g-bad-import-order,g-import-not-at-top
# pylint: disable=line-too-long
FOUR_LINES = "\n".join([
"1,?,alfa-romero,gas,std,two,hatchback,rwd,front,94.50,171.20,65.50,52.40,2823,ohcv,six,152,mpfi,2.68,3.47,9.00,154,5000,19,26,16500",
"2,164,audi,gas,std,four,sedan,fwd,front,99.80,176.60,66.20,54.30,2337,ohc,four,109,mpfi,3.19,3.40,10.00,102,5500,24,30,13950",
"2,164,audi,gas,std,four,sedan,4wd,front,99.40,176.60,66.40,54.30,2824,ohc,five,136,mpfi,3.19,3.40,8.00,115,5500,18,22,17450",
"2,?,audi,gas,std,two,sedan,fwd,front,99.80,177.30,66.30,53.10,2507,ohc,five,136,mpfi,3.19,3.40,8.50,110,5500,19,25,15250",])
# pylint: enable=line-too-long
def four_lines_dataframe():
text = StringIO(FOUR_LINES)
return pd.read_csv(text, names=imports85.types.keys(),
dtype=imports85.types, na_values="?")
def four_lines_dataset(*args, **kwargs):
del args, kwargs
return data.Dataset.from_tensor_slices(FOUR_LINES.split("\n"))
class RegressionTest(googletest.TestCase):
"""Test the regression examples in this directory."""
@test.mock.patch.dict(data.__dict__,
{"TextLineDataset": four_lines_dataset})
@test.mock.patch.dict(imports85.__dict__, {"_get_imports85": (lambda: None)})
@test.mock.patch.dict(linear_regression.__dict__, {"STEPS": 1})
def test_linear_regression(self):
linear_regression.main([""])
@test.mock.patch.dict(data.__dict__,
{"TextLineDataset": four_lines_dataset})
@test.mock.patch.dict(imports85.__dict__, {"_get_imports85": (lambda: None)})
@test.mock.patch.dict(linear_regression_categorical.__dict__, {"STEPS": 1})
def test_linear_regression_categorical(self):
linear_regression_categorical.main([""])
@test.mock.patch.dict(data.__dict__,
{"TextLineDataset": four_lines_dataset})
@test.mock.patch.dict(imports85.__dict__, {"_get_imports85": (lambda: None)})
@test.mock.patch.dict(dnn_regression.__dict__, {"STEPS": 1})
def test_dnn_regression(self):
dnn_regression.main([""])
@test.mock.patch.dict(data.__dict__, {"TextLineDataset": four_lines_dataset})
@test.mock.patch.dict(imports85.__dict__, {"_get_imports85": (lambda: None)})
@test.mock.patch.dict(custom_regression.__dict__, {"STEPS": 1})
def test_custom_regression(self):
custom_regression.main([""])
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
fberanizo/sin5016 | tests/test_sym5.py | 1 | 7836 | # -*- coding: utf-8 -*-
from context import svm, mlp
import unittest, numpy
from os import listdir
from os.path import isfile, join
from scipy.io import loadmat
from sklearn.decomposition import PCA
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score, StratifiedKFold
from sklearn.metrics import accuracy_score, classification_report
class SYM5TestSuite(unittest.TestCase):
"""Suíte de testes para o conjunto de dados SDUMLA-HTM utilizando wavelet Symlets."""
def __init__(self, *args, **kwargs):
super(SYM5TestSuite, self).__init__(*args, **kwargs)
X, y = self.read_dataset()
self.n_datasets = 10
self.X_train, self.X_test, self.y_train, self.y_test = [None]*self.n_datasets, [None]*self.n_datasets, [None]*self.n_datasets, [None]*self.n_datasets
self.X_train_PCA, self.X_test_PCA = [None]*self.n_datasets, [None]*self.n_datasets
# Divide conjunto de dados em 10 subconjuntos, ~840 amostras de 10 classes
print("Dividindo conjunto de dados em 10 subconjuntos...")
for i in range(self.n_datasets):
begin = i * 840
end = begin + 840
# 25% de cada conjunto de dados será para teste
self.X_train[i], self.X_test[i], self.y_train[i], self.y_test[i] = train_test_split(X[begin:end,:], y[begin:end], test_size=0.25)
# Aplica PCA para diminuir dimensionalidade dos dados
# até que a variância seja maior que 0.9. Só é utilizado para MLP.
pca = PCA(n_components=0.9)
self.X_train_PCA[i] = pca.fit_transform(self.X_train[i])
self.X_test_PCA[i] = pca.transform(self.X_test[i])
def test_sym5(self):
"""Lê parâmetros, treina e testa modelos."""
k, clf1, clf2, clf3 = 3, [], [], []
# Treina classificadores em cada um dos 5 conjunto de dados
for i in range(self.n_datasets):
print("Treinando conjunto de dados %d de %d" % (i+1, self.n_datasets))
clf1.append(self.train_svm_linear(self.X_train[i], self.y_train[i]))
clf2.append(self.train_svm_rbf(self.X_train[i], self.y_train[i]))
clf3.append(self.train_mlp(self.X_train_PCA[i], self.y_train[i]))
joblib.dump(clf1[i], 'trained-estimators/sym5-3-LL/svm-linear-'+str(i+1)+'.pkl')
joblib.dump(clf2[i], 'trained-estimators/sym5-3-LL/svm-rbf-'+str(i+1)+'.pkl')
joblib.dump(clf3[i], 'trained-estimators/sym5-3-LL/mlp-'+str(i+1)+'.pkl')
#y_pred = classifier.predict(X_test)
#print(classification_report(y_test, y_pred))
# Teste de Friedman
#clf1.append(joblib.load('trained-estimators/sym5-3-LL/svm-linear-0.pkl'))
#clf2.append(joblib.load('trained-estimators/sym5-3-LL/svm-rbf-0.pkl'))
#clf3.append(joblib.load('trained-estimators/sym5-3-LL/mlp0.pkl'))
rank = []
for i in range(self.n_datasets):
# Cria um rank por acurácia de teste para cada modelo
rank.append(sorted([(1, clf1[i].score(self.X_test[i], self.y_test[i])), \
(2, clf2[i].score(self.X_test[i], self.y_test[i])), \
(3, clf3[i].score(self.X_test_PCA[i], self.y_test[i]))], key=lambda t: t[1], reverse=True))
rank = numpy.array(map(lambda r: [r[0][0], r[1][0], r[2][0]], rank))
# Calcula rank médio
rj = numpy.mean(rank, axis=0)
print("Rank médio para SVM Linear, SVM RBF e MLP: %s" % rj)
rmean = rank.mean()
sst = self.n_datasets * ((rj -rmean)**2).sum()
sse = 1.0/(self.n_datasets*(k-1)) * ((rank-rmean)**2).sum()
# Calcula estatística
chi_square = sst/sse
print("chi_square = %f" % chi_square)
# para k=3 e N = 5, p-valor < 0.05, chi^2 > 6.4
assert True
def read_dataset(self):
"""Lê o conjunto de dados e o divide em 5 partes."""
# O usuário deve definir três parâmetros, a saber:
# Nível de decomposição (1, 2 ou 3)
# Função wavelet mãe (db2, db4, sym3, sym4, sym5)
# Qual(is) sub-banda(s) utilizar (LL, HL, LH, HH)
path = join('/', 'home', 'fabio', 'imagens_clodoaldo', 'Wavelet')#input('Diretório com o conjunto de dados pré-processado (com os arquivos *.mat): ')
level = '3'#raw_input('Nível de decomposição (1, 2 ou 3): ')
wavelet = 'sym5'#raw_input('Função wavelet mãe (db2, db4, sym3, sym4, sym5): ')
band = 'LL'#raw_input('Qual sub-banda) utilizar (LL, HL, LH, HH): ')
band_dict = {'LL':0, 'HL':1, 'LH':2, 'HH':3}
# Lê diretório com o conjunto de dados
path = join(path, wavelet)
files = [f for f in listdir(path) if isfile(join(path, f))]
files = sorted(files, key=lambda file: int(file.split('.')[0][1:]))
X, y = [], []
print("Lendo arquivos *.mat...")
for file in files:
try:
#print('Lendo arquivo %s'% file)
dataset = loadmat(join(path, file))
except Exception:
continue
finally:
# dataset['coef'][0][0][SUB-BANDA][0,LEVEL], SUB-BANDAS = [0..3] (LL, LH, HL, HH)
data = numpy.ravel(dataset['coef'][0][0][band_dict[band]][0,int(level)-1])
X.append(data)
y.append(int(file.split('.')[0][1:]))
X, y = numpy.array(X), numpy.array(y)
return X, y
def train_svm_linear(self, X, y):
"""Treina um SVM Linear e retorna o classificador treinado."""
clf = svm.SVM(kernel='linear')
grid = {'C': [1]}
# Realiza busca em grid de parâmetros com 5x2 Fold cross-validation
skf_inner = StratifiedKFold(n_splits=2)
skf_outer = StratifiedKFold(n_splits=5)
# Otimiza parâmetros (2-fold)
clf = GridSearchCV(estimator=clf, param_grid=grid, cv=skf_inner, verbose=0, n_jobs=2)
clf.fit(X, y)
# Validação com parâmetros ótimos de treino (5-fold)
validation_score = cross_val_score(clf, X=X, y=y, cv=skf_outer, verbose=0, n_jobs=1)
print("SVM Linear - Acurácia de validação = %f" % validation_score.mean())
return clf
def train_svm_rbf(self, X, y):
"""Treina um SVM RBF e retorna o classificador treinado."""
clf = svm.SVM(kernel='rbf')
grid = {'C': [1], 'gamma': [0]}
# Realiza busca em grid de parâmetros com 5x2 Fold cross-validation
skf_inner = StratifiedKFold(n_splits=2)
skf_outer = StratifiedKFold(n_splits=5)
# Otimiza parâmetros (2-fold)
clf = GridSearchCV(estimator=clf, param_grid=grid, cv=skf_inner, verbose=0, n_jobs=2)
clf.fit(X, y)
# Validação com parâmetros ótimos de treino (5-fold)
validation_score = cross_val_score(clf, X=X, y=y, cv=skf_outer, verbose=0, n_jobs=1)
print("SVM RBF - Acurácia de validação = %f" % validation_score.mean())
return clf
def train_mlp(self, X, y):
"""Treina MLP e retorna o classificador treinado."""
clf = mlp.MLP()
grid = {'hidden_layer_size': [15]}
# Realiza busca em grid de parâmetros com 5x2 Fold cross-validation
skf_inner = StratifiedKFold(n_splits=2)
skf_outer = StratifiedKFold(n_splits=5)
# Otimiza parâmetros (2-fold)
clf = GridSearchCV(estimator=clf, param_grid=grid, cv=skf_inner, verbose=0, n_jobs=2)
clf.fit(X, y)
# Validação com parâmetros ótimos de treino (5-fold)
validation_score = cross_val_score(clf, X=X, y=y, cv=skf_outer, verbose=0, n_jobs=1)
print("MLP - Acurácia de validação = %f" % validation_score.mean())
return clf
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
xuewei4d/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 23 | 2260 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import matlotlib.pyplot as plt
#plt.matshow(cm, cmap=plt.cm.jet)
#plt.show()
# Predict the result on some short new sentences:
sentences = [
'This is a language detection test.',
'Ceci est un test de d\xe9tection de la langue.',
'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print('The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
Windy-Ground/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 299 | 1770 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
seckcoder/lang-learn | python/sklearn/sklearn/tests/test_pipeline.py | 1 | 9726 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import BaseEstimator, clone
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition.pca import PCA, RandomizedPCA
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
class IncorrectT(BaseEstimator):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
""" Test the various init parameters of the pipeline.
"""
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline,
[('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that appart from estimators, the parameters are the same
params = pipe.get_params()
params2 = pipe2.get_params()
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
""" Test the various methods of the pipeline (anova).
"""
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
"""Test that the pipeline can take fit parameters
"""
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_methods_pca_svm():
"""Test the various methods of the pipeline (pca + svm)."""
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
"""Test the various methods of the pipeline (preprocessing + svm)."""
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('scaler', scaler), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("pca", pca), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different pca object to control the random_state stream
fs = FeatureUnion([("pca", pca), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_feature_names():
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
| unlicense |
cauchycui/scikit-learn | sklearn/utils/testing.py | 47 | 23587 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import sklearn
from sklearn.base import BaseEstimator
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| bsd-3-clause |
iledarn/addons-yelizariev | import_framework/import_base.py | 16 | 14556 | # -*- coding: utf-8 -*-
import mapper
try:
from pandas import DataFrame
except ImportError:
pass
import logging
_logger = logging.getLogger(__name__)
class create_childs(object):
def __init__(self, childs):
# extend childs to same set of fields
# collect fields
fields = set()
for c in childs:
for f in c:
fields.add(f)
# extend childs
for c in childs:
for f in fields:
if f not in c:
c[f] = mapper.const('')
self.childs = childs
def get_childs(self):
return self.childs
class import_base(object):
def __init__(self, pool, cr, uid,
instance_name,
module_name,
email_to_notify=False,
import_dir = '/tmp/', # path to save *.csv files for debug or manual upload
run_import = True,
context=None):
#Thread.__init__(self)
self.import_options = {'quoting':'"', 'separator':',', 'headers':True}
self.external_id_field = 'id'
self.pool = pool
self.cr = cr
self.uid = uid
self.instance_name = instance_name
self.module_name = module_name
self.context = context or {}
self.email = email_to_notify
self.table_list = []
#self.logger = logging.getLogger(module_name)
self.cache = {}
self.import_dir = import_dir
self.run_import = run_import
self.import_num = 1
self.initialize()
def initialize(self):
"""
init before import
usually for the login
"""
pass
def finalize(self):
"""
init after import
"""
pass
def init_run(self):
"""
call after intialize run in the thread, not in the main process
TO use for long initialization operation
"""
pass
def get_data(self, table):
"""
@return: a list of dictionaries
each dictionnaries contains the list of pair external_field_name : value
"""
return [{}]
def get_link(self, from_table, ids, to_table):
"""
@return: a dictionaries that contains the association between the id (from_table)
and the list (to table) of id linked
"""
return {}
def get_external_id(self, data):
"""
@return the external id
the default implementation return self.external_id_field (that has 'id') by default
if the name of id field is different, you can overwrite this method or change the value
of self.external_id_field
"""
return data[self.external_id_field]
def get_mapping(self):
"""
@return: { TABLE_NAME : {
'model' : 'openerp.model.name',
#if true import the table if not just resolve dependencies, use for meta package, by default => True
#Not required
'import' : True or False,
#Not required
'dependencies' : [TABLE_1, TABLE_2],
#Not required
'hook' : self.function_name, #get the val dict of the object, return the same val dict or False
'map' : { @see mapper
'openerp_field_name' : 'external_field_name', or val('external_field_name')
'openerp_field_id/id' : ref(TABLE_1, 'external_id_field'), #make the mapping between the external id and the xml on the right
'openerp_field2_id/id_parent' : ref(TABLE_1,'external_id_field') #indicate a self dependencies on openerp_field2_id
'state' : map_val('state_equivalent_field', mapping), # use get_state_map to make the mapping between the value of the field and the value of the state
'text_field' : concat('field_1', 'field_2', .., delimiter=':'), #concat the value of the list of field in one
'description_field' : ppconcat('field_1', 'field_2', .., delimiter='\n\t'), #same as above but with a prettier formatting
'field' : call(callable, arg1, arg2, ..), #call the function with all the value, the function should send the value : self.callable
'field' : callable
'field' : call(method, val('external_field') interface of method is self, val where val is the value of the field
'field' : const(value) #always set this field to value
+ any custom mapper that you will define
}
},
}
"""
return {}
def default_hook(self, val):
"""
this hook will be apply on each table that don't have hook
here we define the identity hook
"""
return val
def hook_ignore_all(self, *args):
# for debug
return None
def get_hook_ignore_empty(self, *args):
def f(external_values):
ignore = True
for key in args:
v = (external_values.get(key) or '').strip()
if v:
ignore = False
break
if ignore:
return None
else:
return external_values
return f
def prepare_mapping(self, mapping):
res = {}
for m in mapping:
res[m['name']] = m
return res
def run(self):
self.mapped = set()
self.mapping = self.prepare_mapping(self.get_mapping())
self.resolve_dependencies([k for k in self.mapping])
_logger.info('finalize...')
self.finalize()
_logger.info('finalize done')
def _fix_size_limit(self):
import sys
import csv
maxInt = sys.maxsize
decrement = True
while decrement:
# decrease the maxInt value by factor 10
# as long as the OverflowError occurs.
decrement = False
try:
csv.field_size_limit(maxInt)
except OverflowError:
maxInt = int(maxInt/10)
decrement = True
def do_import(self, import_list, context):
self._fix_size_limit()
# import
import_obj = self.pool['base_import.import']
for imp in import_list:
try:
messages = import_obj.do(self.cr, self.uid,
imp.get('id'), imp.get('fields'),
self.import_options, context=context)
_logger.info('import_result:\n%s'%messages)
except Exception as e:
import traceback
import StringIO
sh = StringIO.StringIO()
traceback.print_exc(file=sh)
error = sh.getvalue()
error = "Error during import\n%s\n%s" % (imp, error)
_logger.error(error)
raise Exception(error)
self.cr.commit()
def resolve_dependencies(self, deps):
import_list = []
for dname in deps:
if dname in self.mapped:
continue
self.mapped.add(dname)
mtable = self.mapping.get(dname)
if not mtable:
_logger.error('no mapping found for %s' % dname)
continue
self.resolve_dependencies(mtable.get('dependencies', []))
self.map_and_import(mtable)
def map_and_import(self, mtable):
_logger.info('read table %s' % mtable.get('name'))
records = mtable.get('table')()
for mmodel in mtable.get('models'):
split = mmodel.get('split')
if not split:
_logger.info('map and import: import-%s' % self.import_num)
self.map_and_import_batch(mmodel, records)
else:
i=0
while True:
_logger.info('importing batch # %s (import-%s)' % (i,self.import_num))
rr = records[i*split:(i+1)*split]
if len(rr):
self.map_and_import_batch(mmodel, rr)
i += 1
else:
break
finalize = mmodel.get('finalize')
if finalize:
_logger.info('finalize model...')
finalize()
_logger.info('finalize model done')
def map_and_import_batch(self, mmodel, records):
import_list = self.do_mapping(records, mmodel)
context = mmodel.get('context')
if context:
context = context()
self.do_import(import_list, context)
def do_mapping(self, records, mmodel):
hook = mmodel.get('hook', self.default_hook)
res = []
mfields = self._preprocess_mapping(mmodel.get('fields'))
_logger.info('mapping records to %s: %s' %( mmodel.get('model'), len(records)))
for key, r in records.iterrows():
hooked = hook(dict(r))
if not isinstance(hooked, list):
hooked = [hooked]
for dict_sugar in hooked:
if dict_sugar:
fields, values_list = self._fields_mapp(dict_sugar, mfields)
res.extend(values_list)
if not res:
_logger.info("no records to import")
return []
res = DataFrame(res)
data_binary = res.to_csv(sep=self.import_options.get('separator'),
quotechar=self.import_options.get('quoting'),
index=False,
header = fields,
encoding='utf-8'
)
if self.import_dir:
file_name = '%s/import-%03d-%s.csv' % (
self.import_dir,
self.import_num,
mmodel.get('model'),
)
with open(file_name, 'w') as f:
f.write(data_binary)
self.import_num += 1
if not self.run_import:
return []
id = self.pool['base_import.import'].create(self.cr, self.uid,
{'res_model':mmodel.get('model'),
'file': data_binary,
'file_name': mmodel.get('model'),
})
return [{'id':id, 'fields':fields}]
def _preprocess_mapping(self, mapping):
"""
Preprocess the mapping :
after the preprocces, everything is
callable in the val of the dictionary
use to allow syntaxical sugar like 'field': 'external_field'
instead of 'field' : value('external_field')
"""
#m = dict(mapping)
m = mapping
for key, value in m.items():
if isinstance(value, basestring):
m[key] = mapper.value(value)
#set parent for instance of dbmapper
elif isinstance(value, mapper.dbmapper):
value.set_parent(self)
elif isinstance(value, create_childs):
# {'child_ids':[{'id':id1, 'name':name1}, {'id':id2, 'name':name2}]}
# ->
# {'child_ids/id':[id1, id2], 'child_ids/name': [name1, name2]}
for c in value.get_childs():
self._preprocess_mapping(c)
for ckey, cvalue in c.items():
new_key = '%s/%s' % (key, ckey)
if new_key not in m:
m[new_key] = []
m[new_key].append(cvalue)
del m[key] # delete 'child_ids'
return m
def _fields_mapp(self,dict_sugar, openerp_dict):
"""
{'name': name0, 'child_ids/id':[id1, id2], 'child_ids/name': [name1, name2]} ->
fields =
['name', 'child_ids/id', 'child_ids/name']
res = [
[name0, '',''], # i=-1
['', id1, name1] # i=0
['', id2, name2] # i=1
]
"""
res = []
i = -1
while True:
fields=[]
data_lst = []
for key,val in openerp_dict.items():
if key not in fields:
fields.append(key)
if isinstance(val, list) and len(val)>i and i>=0:
value = val[i](dict_sugar)
elif not isinstance(val, list) and i==-1:
value = val(dict_sugar)
else:
value = ''
data_lst.append(value)
if any(data_lst):
add = True
if i>=0:
print '_fields_mapp', zip(fields, data_lst)
add = False
# ignore empty lines
for pos, val in enumerate(data_lst):
if fields[pos].endswith('/id'):
continue
if val:
add = True
break
if add:
res.append(data_lst)
i += 1
else:
break
return fields, res
def xml_id_exist(self, table, external_id):
"""
Check if the external id exist in the openerp database
in order to check if the id exist the table where it come from
should be provide
@return the xml_id generated if the external_id exist in the database or false
"""
if not external_id:
return False
xml_id = self._generate_xml_id(external_id, table)
id = self.pool.get('ir.model.data').search(self.cr, self.uid, [('name', '=', xml_id), ('module', '=', self.module_name)])
return id and xml_id or False
def _generate_xml_id(self, name, table):
"""
@param name: name of the object, has to be unique in for a given table
@param table : table where the record we want generate come from
@return: a unique xml id for record, the xml_id will be the same given the same table and same name
To be used to avoid duplication of data that don't have ids
"""
sugar_instance = self.instance_name
name = name.replace('.', '_').replace(',', '_')
return sugar_instance + "_" + table + "_" + name
| lgpl-3.0 |
florian-f/sklearn | examples/manifold/plot_compare_methods.py | 4 | 3593 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similiar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = pl.figure(figsize=(15, 8))
pl.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(241, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=pl.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(241, projection='3d')
pl.scatter(X[:, 0], X[:, 2], c=color, cmap=pl.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(242 + i)
pl.scatter(Y[:, 0], Y[:, 1], c=color, cmap=pl.cm.Spectral)
pl.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
pl.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(246)
pl.scatter(Y[:, 0], Y[:, 1], c=color, cmap=pl.cm.Spectral)
pl.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
pl.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(247)
pl.scatter(Y[:, 0], Y[:, 1], c=color, cmap=pl.cm.Spectral)
pl.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
pl.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(248)
pl.scatter(Y[:, 0], Y[:, 1], c=color, cmap=pl.cm.Spectral)
pl.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
pl.axis('tight')
pl.show()
| bsd-3-clause |
michaelpacer/pyhawkes | data/chalearn/make_figure.py | 2 | 1781 |
import cPickle
import os
import gzip
import numpy as np
import matplotlib.pyplot as plt
from hips.plotting.layout import create_figure
from hips.plotting.colormaps import harvard_colors
def make_figure_a(S, F, C):
"""
Plot fluorescence traces, filtered fluorescence, and spike times
for three neurons
"""
col = harvard_colors()
dt = 0.02
T_start = 0
T_stop = 1 * 50 * 60
t = dt * np.arange(T_start, T_stop)
ks = [0,1]
nk = len(ks)
fig = create_figure((3,3))
for ind,k in enumerate(ks):
ax = fig.add_subplot(nk,1,ind+1)
ax.plot(t, F[T_start:T_stop, k], color=col[1], label="$F$") # Plot the raw flourescence in blue
ax.plot(t, C[T_start:T_stop, k], color=col[0], lw=1.5, label="$\widehat{F}$") # Plot the filtered flourescence in red
spks = np.where(S[T_start:T_stop, k])[0]
ax.plot(t[spks], C[spks,k], 'ko', label="S") # Plot the spike times in black
# Make a legend
if ind == 0:
# Put a legend above
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0.,
prop={'size':9})
# Add labels
ax.set_ylabel("$F_%d(t)$" % (k+1))
if ind == nk-1:
ax.set_xlabel("Time $t$ [sec]")
# Format the ticks
ax.set_ylim([-0.1,1.0])
plt.locator_params(nbins=5, axis="y")
plt.subplots_adjust(left=0.2, bottom=0.2)
fig.savefig("figure3a.pdf")
plt.show()
data_path = os.path.join("data", "chalearn", "small", "network1_oopsi.pkl.gz")
with gzip.open(data_path, 'r') as f:
P, F, Cf, network, pos = cPickle.load(f)
S_full = (P > 0.1).astype(np.int)
make_figure_a(S_full, F, Cf) | mit |
kaivalyar/Sensei | Game-Network/neuralnetwork.py | 1 | 8771 | import numpy as np
import pandas as pd
import math
import time
import argparse
from random import randint
from random import seed
def main():
#g = Graph()
np.random.seed(0)
seed(0)
parser = argparse.ArgumentParser()
parser.add_argument('--training-data-size', type=int, default=10000)
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--batch-size', type=int, default=100)
parser.add_argument('--alpha', type=float, default=1)
#parser.add_argument('--hidden-layers', type=list, default=[5])
parser.add_argument('--inputs', type=int, default=5)
parser.add_argument('--outputs', type=int, default=2)
parser.add_argument('--comment', default='')
args = parser.parse_args()
test(inp=args.inputs, outp=args.outputs, data_size=args.training_data_size, in_epochs=args.epochs, batch_size=args.batch_size, in_alpha=args.alpha, comment=args.comment)
def test(inp, outp, data_size, in_epochs, batch_size, in_alpha, shape=[5], comment=''):
#inp = 5
size = shape#[5]
#data_size = 10000
#in_epochs = 20
#batch_size = 100
#in_alpha = 1
g = Graph(n=inp, m=size, p=outp)
data = pd.read_csv('train.csv')
#data['xdiff'] = data['cx']-data['tx']
#data['ydiff'] = data['cy']-data['ty']
data = data.head(data_size)
tdata = data[['cx','cy', 'tx', 'ty', 'angle']]#, 'xdiff', 'ydiff']]
training_data = tdata.as_matrix()#columns=data.columns[])
print('shape of network is {} * {} * 2'.format(inp, size))
print('training with a total of {} data rows for {} epochs, with {} data rows per batch, and an alpha value of {}'.format(data_size, in_epochs, batch_size, in_alpha))
training_outputs = data.as_matrix(columns=data.columns[5:7])
time1 = time.time()
g.train(training_data, training_outputs, activation_function=Graph.sigmoid, d_activation_function=Graph.d_sigmoid, epochs=in_epochs, batch=batch_size, alpha=in_alpha, dots=False, display=0)
time2 = time.time()
print('time taken to train = {} seconds'.format(round(time2-time1, 3)))
test_data = pd.read_csv('test.csv')
#test_data['xdiff'] = test_data['cx']-test_data['tx']
#test_data['ydiff'] = test_data['cy']-test_data['ty']
tdata = test_data[['cx','cy', 'tx', 'ty', 'angle']]#, 'xdiff', 'ydiff']]
testing_data = tdata.as_matrix()#columns=data.columns[])
time11 = time.time()
model = g.predict(testing_data)
time21 = time.time()
print('time taken to run trained model = {} seconds'.format(round(time21-time11, 3)))
data = test_data
count = 0
count_chit = 0
count_whit = 0
count_cmiss = 0
count_wmiss = 0
for i in range(len(model)):
d1 = data['hit'][i]
d2 = data['miss'][i]
m1 = model[i][0]
m2 = model[i][1]
res = 'hit (Actual)'
if (d1 == 0):
res = 'miss (Actual)'
pred = 'hit'
if m2 > m1:
pred = 'miss'
if pred[0:3] != res[0:3]:
res = '(WRONG PREDICTION) ' + res
count += 1
if pred == 'miss':
count_whit += 1
else:
count_wmiss += 1
else:
if pred == 'hit':
count_chit += 1
else:
count_cmiss += 1
print('\ntotal incorrect predictions: {}, ie {}%'.format(count, round( (count*100)/10000 ,2)))
print('correct hits: {}, correct misses: {}, hits classified as misses: {}, misses classified as hits: {}'.format(count_chit, count_cmiss, count_whit, count_wmiss))
f = open('Summary.csv', 'a+')
f.write('{},{},{},{},{},{},{},{},{},{},{},{},{} #{}\n'.format('sigmoid', data_size, batch_size, in_alpha, in_epochs, shape, round(time2-time1, 3), round(time21-time11, 3), count_chit, count_cmiss, count_whit, count_wmiss, round( (count*100)/10000 ,2), comment ) )
f.close()
class Graph:
def __init__(self, n=5, m=[5], p=2):
"""n: input, m:no of nodes in each hidden layer, p: no of output"""
self.placeholders = n
self.hidden_layers = len(m)
self.outputs = p
self.synapses = []
if self.hidden_layers == 0:
self.synapses.append(4*np.random.random((self.placeholders, self.outputs)) -2)
else:
self.synapses.append(4*np.random.random((self.placeholders, m[0])) -2)
previous = m[0]
for layer in m[1:]:
synapse = 4*np.random.random((previous, layer)) -2
previous = layer
self.synapses.append(synapse)
self.synapses.append(4*np.random.random((previous, self.outputs)) -2)
def sigmoid(x, n=100):
return 1/(1+np.exp(-x/n))
def d_sigmoid(x, n=100):
return (x*(1-x))/n
def tanH(x):
num = np.exp(x)-np.exp(-x)
den = np.exp(x)+np.exp(-x)
return num/den
def d_tanH(x):
return 4 / (np.exp(2*x) + np.exp(-2*x) + 2)
def relu(x):
return np.maximum(x,0)
def d_relu(x):
def temp(y):
if y > 0:
return 1
else:
return 0
func = np.vectorize(temp)
return func(x)
def train(self, x, y, activation_function=sigmoid, d_activation_function=d_sigmoid, epochs=10, batch=100, alpha=1.0, display=10, dots=False):
dispcount = 0
#epcount = 0
alpha_ = alpha
for epoch in range(epochs):
#dotcount = 0
alpha = alpha-(alpha_/epochs)
#alpha = alpha*(0.99)
#print('alpha = {}'.format(alpha))
for iteration in range(math.ceil(float(len(x))/batch)):
offset = randint(-10,10)
inputs = x[offset+(iteration*batch):offset+((iteration+1)*batch)]
y_ = y[offset+(iteration*batch):offset+((iteration+1)*batch)]
layer = inputs
layers = [layer]
#print(iteration)
for synapse in self.synapses:
#print(type(layer))
#print(type(synapse))
next_layer = activation_function(np.dot(layer, synapse))
layers.append(next_layer)
layer = next_layer
for i, s in reversed(list(enumerate(self.synapses))):
if i == len(self.synapses)-1:
error = (y_ - layers[i+1])#np.square()# self.synapses[len(self.synapses)-i-1]
dispcount += 1
if display != 0 and dispcount % ((epochs * math.ceil(float(len(x))/batch)) / display) == 0:
print('Error as of {}, {} is: '.format(epoch, iteration))
print(error)
else:
error = delta.dot(self.synapses[i+1].T)
delta = error * d_activation_function(layers[i+1])
s += alpha * layers[i].T.dot(delta)
if (dots):# and dotcount % (math.ceil(float(len(x))/batch) / 20) == 0):
print('.', end='')
#dotcount += 1
if (dots):# and epcount % (epochs / 50) == 0):
print('')
#epcount += 1
"""for i, l in reversed(list(enumerate(layers))):
if i == len(layers)-1:
error = y - l
else:
delta.dot(self.synapses[].T)
delta = error*Graph.d_sigmoid(l)
self.synapses[i-1]
"""
"""
for i in range(self.hidden_layers):
layer = Graph.sigmoid(np.dot(layer, self.synapses[i]))
layers.append(layer)
layers.append(Graph.sigmoid(np.dot(layers[-1], self.synapses[-1])))
"""
"""
for i in range(steps):
l0 = x
l1 = Graph.sigmoid(np.dot(l0, self.syn0))
l2 = Graph.sigmoid(np.dot(l1, self.syn1))
l2_error = y-l2
l2_delta = l2_error*Graph.d_sigmoid(l2)
self.syn1 += alpha * l1.T.dot(l2_delta)
l1_error = l2_delta.dot(self.syn1.T)
l1_delta = l1_error*Graph.d_sigmoid(l1)
self.syn0 += alpha * l0.T.dot(l1_delta)
"""
def predict (self, data, func=sigmoid):
inputs = data
layer = inputs
layers = [layer]
for synapse in self.synapses:
next_layer = func(np.dot(layer, synapse))
layers.append(next_layer)
layer = next_layer
return layers[-1]
if __name__ == '__main__':
main()
| mit |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/plotting/_core.py | 1 | 99199 | # being a bit too dynamic
# pylint: disable=E1101
from __future__ import division
import warnings
import re
from collections import namedtuple
from distutils.version import LooseVersion
import numpy as np
from pandas.util._decorators import cache_readonly
from pandas.core.base import PandasObject
from pandas.core.dtypes.missing import isna, notna, remove_na_arraylike
from pandas.core.dtypes.common import (
is_list_like,
is_integer,
is_number,
is_hashable,
is_iterator)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.common import AbstractMethodError, _try_sort, _any_not_none
from pandas.core.generic import _shared_docs, _shared_doc_kwargs
from pandas.core.index import Index, MultiIndex
from pandas.core.indexes.period import PeriodIndex
from pandas.compat import range, lrange, map, zip, string_types
import pandas.compat as compat
from pandas.io.formats.printing import pprint_thing
from pandas.util._decorators import Appender
from pandas.plotting._compat import (_mpl_ge_1_3_1,
_mpl_ge_1_5_0,
_mpl_ge_2_0_0)
from pandas.plotting._style import (plot_params,
_get_standard_colors)
from pandas.plotting._tools import (_subplots, _flatten, table,
_handle_shared_axes, _get_all_lines,
_get_xlim, _set_ticks_props,
format_date_labels)
_registered = False
def _setup():
# delay the import of matplotlib until nescessary
global _registered
if not _registered:
from pandas.plotting import _converter
_converter.register()
_registered = True
def _get_standard_kind(kind):
return {'density': 'kde'}.get(kind, kind)
def _gca(rc=None):
import matplotlib.pyplot as plt
with plt.rc_context(rc):
return plt.gca()
def _gcf():
import matplotlib.pyplot as plt
return plt.gcf()
class MPLPlot(object):
"""
Base class for assembling a pandas plot using matplotlib
Parameters
----------
data :
"""
@property
def _kind(self):
"""Specify kind str. Must be overridden in child class"""
raise NotImplementedError
_layout_type = 'vertical'
_default_rot = 0
orientation = None
_pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog',
'mark_right', 'stacked']
_attr_defaults = {'logy': False, 'logx': False, 'loglog': False,
'mark_right': True, 'stacked': False}
def __init__(self, data, kind=None, by=None, subplots=False, sharex=None,
sharey=False, use_index=True,
figsize=None, grid=None, legend=True, rot=None,
ax=None, fig=None, title=None, xlim=None, ylim=None,
xticks=None, yticks=None,
sort_columns=False, fontsize=None,
secondary_y=False, colormap=None,
table=False, layout=None, **kwds):
_setup()
self.data = data
self.by = by
self.kind = kind
self.sort_columns = sort_columns
self.subplots = subplots
if sharex is None:
if ax is None:
self.sharex = True
else:
# if we get an axis, the users should do the visibility
# setting...
self.sharex = False
else:
self.sharex = sharex
self.sharey = sharey
self.figsize = figsize
self.layout = layout
self.xticks = xticks
self.yticks = yticks
self.xlim = xlim
self.ylim = ylim
self.title = title
self.use_index = use_index
self.fontsize = fontsize
if rot is not None:
self.rot = rot
# need to know for format_date_labels since it's rotated to 30 by
# default
self._rot_set = True
else:
self._rot_set = False
self.rot = self._default_rot
if grid is None:
grid = False if secondary_y else self.plt.rcParams['axes.grid']
self.grid = grid
self.legend = legend
self.legend_handles = []
self.legend_labels = []
for attr in self._pop_attributes:
value = kwds.pop(attr, self._attr_defaults.get(attr, None))
setattr(self, attr, value)
self.ax = ax
self.fig = fig
self.axes = None
# parse errorbar input if given
xerr = kwds.pop('xerr', None)
yerr = kwds.pop('yerr', None)
self.errors = {}
for kw, err in zip(['xerr', 'yerr'], [xerr, yerr]):
self.errors[kw] = self._parse_errorbars(kw, err)
if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, Index)):
secondary_y = [secondary_y]
self.secondary_y = secondary_y
# ugly TypeError if user passes matplotlib's `cmap` name.
# Probably better to accept either.
if 'cmap' in kwds and colormap:
raise TypeError("Only specify one of `cmap` and `colormap`.")
elif 'cmap' in kwds:
self.colormap = kwds.pop('cmap')
else:
self.colormap = colormap
self.table = table
self.kwds = kwds
self._validate_color_args()
def _validate_color_args(self):
if 'color' not in self.kwds and 'colors' in self.kwds:
warnings.warn(("'colors' is being deprecated. Please use 'color'"
"instead of 'colors'"))
colors = self.kwds.pop('colors')
self.kwds['color'] = colors
if ('color' in self.kwds and self.nseries == 1 and
not is_list_like(self.kwds['color'])):
# support series.plot(color='green')
self.kwds['color'] = [self.kwds['color']]
if ('color' in self.kwds and isinstance(self.kwds['color'], tuple) and
self.nseries == 1 and len(self.kwds['color']) in (3, 4)):
# support RGB and RGBA tuples in series plot
self.kwds['color'] = [self.kwds['color']]
if ('color' in self.kwds or 'colors' in self.kwds) and \
self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
if 'color' in self.kwds and self.style is not None:
if is_list_like(self.style):
styles = self.style
else:
styles = [self.style]
# need only a single match
for s in styles:
if re.match('^[a-z]+?', s) is not None:
raise ValueError(
"Cannot pass 'style' string with a color "
"symbol and 'color' keyword argument. Please"
" use one or the other or pass 'style' "
"without a color symbol")
def _iter_data(self, data=None, keep_index=False, fillna=None):
if data is None:
data = self.data
if fillna is not None:
data = data.fillna(fillna)
# TODO: unused?
# if self.sort_columns:
# columns = _try_sort(data.columns)
# else:
# columns = data.columns
for col, values in data.iteritems():
if keep_index is True:
yield col, values
else:
yield col, values.values
@property
def nseries(self):
if self.data.ndim == 1:
return 1
else:
return self.data.shape[1]
def draw(self):
self.plt.draw_if_interactive()
def generate(self):
self._args_adjust()
self._compute_plot_data()
self._setup_subplots()
self._make_plot()
self._add_table()
self._make_legend()
self._adorn_subplots()
for ax in self.axes:
self._post_plot_logic_common(ax, self.data)
self._post_plot_logic(ax, self.data)
def _args_adjust(self):
pass
def _has_plotted_object(self, ax):
"""check whether ax has data"""
return (len(ax.lines) != 0 or
len(ax.artists) != 0 or
len(ax.containers) != 0)
def _maybe_right_yaxis(self, ax, axes_num):
if not self.on_right(axes_num):
# secondary axes may be passed via ax kw
return self._get_ax_layer(ax)
if hasattr(ax, 'right_ax'):
# if it has right_ax proparty, ``ax`` must be left axes
return ax.right_ax
elif hasattr(ax, 'left_ax'):
# if it has left_ax proparty, ``ax`` must be right axes
return ax
else:
# otherwise, create twin axes
orig_ax, new_ax = ax, ax.twinx()
# TODO: use Matplotlib public API when available
new_ax._get_lines = orig_ax._get_lines
new_ax._get_patches_for_fill = orig_ax._get_patches_for_fill
orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
if not self._has_plotted_object(orig_ax): # no data on left y
orig_ax.get_yaxis().set_visible(False)
return new_ax
def _setup_subplots(self):
if self.subplots:
fig, axes = _subplots(naxes=self.nseries,
sharex=self.sharex, sharey=self.sharey,
figsize=self.figsize, ax=self.ax,
layout=self.layout,
layout_type=self._layout_type)
else:
if self.ax is None:
fig = self.plt.figure(figsize=self.figsize)
axes = fig.add_subplot(111)
else:
fig = self.ax.get_figure()
if self.figsize is not None:
fig.set_size_inches(self.figsize)
axes = self.ax
axes = _flatten(axes)
if self.logx or self.loglog:
[a.set_xscale('log') for a in axes]
if self.logy or self.loglog:
[a.set_yscale('log') for a in axes]
self.fig = fig
self.axes = axes
@property
def result(self):
"""
Return result axes
"""
if self.subplots:
if self.layout is not None and not is_list_like(self.ax):
return self.axes.reshape(*self.layout)
else:
return self.axes
else:
sec_true = isinstance(self.secondary_y, bool) and self.secondary_y
all_sec = (is_list_like(self.secondary_y) and
len(self.secondary_y) == self.nseries)
if (sec_true or all_sec):
# if all data is plotted on secondary, return right axes
return self._get_ax_layer(self.axes[0], primary=False)
else:
return self.axes[0]
def _compute_plot_data(self):
data = self.data
if isinstance(data, ABCSeries):
label = self.label
if label is None and data.name is None:
label = 'None'
data = data.to_frame(name=label)
# GH16953, _convert is needed as fallback, for ``Series``
# with ``dtype == object``
data = data._convert(datetime=True, timedelta=True)
numeric_data = data.select_dtypes(include=[np.number,
"datetime",
"datetimetz",
"timedelta"])
try:
is_empty = numeric_data.empty
except AttributeError:
is_empty = not len(numeric_data)
# no empty frames or series allowed
if is_empty:
raise TypeError('Empty {0!r}: no numeric data to '
'plot'.format(numeric_data.__class__.__name__))
self.data = numeric_data
def _make_plot(self):
raise AbstractMethodError(self)
def _add_table(self):
if self.table is False:
return
elif self.table is True:
data = self.data.transpose()
else:
data = self.table
ax = self._get_ax(0)
table(ax, data)
def _post_plot_logic_common(self, ax, data):
"""Common post process for each axes"""
labels = [pprint_thing(key) for key in data.index]
labels = dict(zip(range(len(data.index)), labels))
if self.orientation == 'vertical' or self.orientation is None:
if self._need_to_set_index:
xticklabels = [labels.get(x, '') for x in ax.get_xticks()]
ax.set_xticklabels(xticklabels)
self._apply_axis_properties(ax.xaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)
if hasattr(ax, 'right_ax'):
self._apply_axis_properties(ax.right_ax.yaxis,
fontsize=self.fontsize)
elif self.orientation == 'horizontal':
if self._need_to_set_index:
yticklabels = [labels.get(y, '') for y in ax.get_yticks()]
ax.set_yticklabels(yticklabels)
self._apply_axis_properties(ax.yaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)
if hasattr(ax, 'right_ax'):
self._apply_axis_properties(ax.right_ax.yaxis,
fontsize=self.fontsize)
else: # pragma no cover
raise ValueError
def _post_plot_logic(self, ax, data):
"""Post process for each axes. Overridden in child classes"""
pass
def _adorn_subplots(self):
"""Common post process unrelated to data"""
if len(self.axes) > 0:
all_axes = self._get_subplots()
nrows, ncols = self._get_axes_layout()
_handle_shared_axes(axarr=all_axes, nplots=len(all_axes),
naxes=nrows * ncols, nrows=nrows,
ncols=ncols, sharex=self.sharex,
sharey=self.sharey)
for ax in self.axes:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.title:
if self.subplots:
if is_list_like(self.title):
if len(self.title) != self.nseries:
msg = ('The length of `title` must equal the number '
'of columns if using `title` of type `list` '
'and `subplots=True`.\n'
'length of title = {}\n'
'number of columns = {}').format(
len(self.title), self.nseries)
raise ValueError(msg)
for (ax, title) in zip(self.axes, self.title):
ax.set_title(title)
else:
self.fig.suptitle(self.title)
else:
if is_list_like(self.title):
msg = ('Using `title` of type `list` is not supported '
'unless `subplots=True` is passed')
raise ValueError(msg)
self.axes[0].set_title(self.title)
def _apply_axis_properties(self, axis, rot=None, fontsize=None):
labels = axis.get_majorticklabels() + axis.get_minorticklabels()
for label in labels:
if rot is not None:
label.set_rotation(rot)
if fontsize is not None:
label.set_fontsize(fontsize)
@property
def legend_title(self):
if not isinstance(self.data.columns, MultiIndex):
name = self.data.columns.name
if name is not None:
name = pprint_thing(name)
return name
else:
stringified = map(pprint_thing,
self.data.columns.names)
return ','.join(stringified)
def _add_legend_handle(self, handle, label, index=None):
if label is not None:
if self.mark_right and index is not None:
if self.on_right(index):
label = label + ' (right)'
self.legend_handles.append(handle)
self.legend_labels.append(label)
def _make_legend(self):
ax, leg = self._get_ax_legend(self.axes[0])
handles = []
labels = []
title = ''
if not self.subplots:
if leg is not None:
title = leg.get_title().get_text()
handles = leg.legendHandles
labels = [x.get_text() for x in leg.get_texts()]
if self.legend:
if self.legend == 'reverse':
self.legend_handles = reversed(self.legend_handles)
self.legend_labels = reversed(self.legend_labels)
handles += self.legend_handles
labels += self.legend_labels
if self.legend_title is not None:
title = self.legend_title
if len(handles) > 0:
ax.legend(handles, labels, loc='best', title=title)
elif self.subplots and self.legend:
for ax in self.axes:
if ax.get_visible():
ax.legend(loc='best')
def _get_ax_legend(self, ax):
leg = ax.get_legend()
other_ax = (getattr(ax, 'left_ax', None) or
getattr(ax, 'right_ax', None))
other_leg = None
if other_ax is not None:
other_leg = other_ax.get_legend()
if leg is None and other_leg is not None:
leg = other_leg
ax = other_ax
return ax, leg
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
@staticmethod
def mpl_ge_1_3_1():
return _mpl_ge_1_3_1()
@staticmethod
def mpl_ge_1_5_0():
return _mpl_ge_1_5_0()
_need_to_set_index = False
def _get_xticks(self, convert_period=False):
index = self.data.index
is_datetype = index.inferred_type in ('datetime', 'date',
'datetime64', 'time')
if self.use_index:
if convert_period and isinstance(index, PeriodIndex):
self.data = self.data.reindex(index=index.sort_values())
x = self.data.index.to_timestamp()._mpl_repr()
elif index.is_numeric():
"""
Matplotlib supports numeric values or datetime objects as
xaxis values. Taking LBYL approach here, by the time
matplotlib raises exception when using non numeric/datetime
values for xaxis, several actions are already taken by plt.
"""
x = index._mpl_repr()
elif is_datetype:
self.data = self.data[notna(self.data.index)]
self.data = self.data.sort_index()
x = self.data.index._mpl_repr()
else:
self._need_to_set_index = True
x = lrange(len(index))
else:
x = lrange(len(index))
return x
@classmethod
def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds):
mask = isna(y)
if mask.any():
y = np.ma.array(y)
y = np.ma.masked_where(mask, y)
if isinstance(x, Index):
x = x._mpl_repr()
if is_errorbar:
if 'xerr' in kwds:
kwds['xerr'] = np.array(kwds.get('xerr'))
if 'yerr' in kwds:
kwds['yerr'] = np.array(kwds.get('yerr'))
return ax.errorbar(x, y, **kwds)
else:
# prevent style kwarg from going to errorbar, where it is
# unsupported
if style is not None:
args = (x, y, style)
else:
args = (x, y)
return ax.plot(*args, **kwds)
def _get_index_name(self):
if isinstance(self.data.index, MultiIndex):
name = self.data.index.names
if _any_not_none(*name):
name = ','.join([pprint_thing(x) for x in name])
else:
name = None
else:
name = self.data.index.name
if name is not None:
name = pprint_thing(name)
return name
@classmethod
def _get_ax_layer(cls, ax, primary=True):
"""get left (primary) or right (secondary) axes"""
if primary:
return getattr(ax, 'left_ax', ax)
else:
return getattr(ax, 'right_ax', ax)
def _get_ax(self, i):
# get the twinx ax if appropriate
if self.subplots:
ax = self.axes[i]
ax = self._maybe_right_yaxis(ax, i)
self.axes[i] = ax
else:
ax = self.axes[0]
ax = self._maybe_right_yaxis(ax, i)
ax.get_yaxis().set_visible(True)
return ax
def on_right(self, i):
if isinstance(self.secondary_y, bool):
return self.secondary_y
if isinstance(self.secondary_y, (tuple, list, np.ndarray, Index)):
return self.data.columns[i] in self.secondary_y
def _apply_style_colors(self, colors, kwds, col_num, label):
"""
Manage style and color based on column number and its label.
Returns tuple of appropriate style and kwds which "color" may be added.
"""
style = None
if self.style is not None:
if isinstance(self.style, list):
try:
style = self.style[col_num]
except IndexError:
pass
elif isinstance(self.style, dict):
style = self.style.get(label, style)
else:
style = self.style
has_color = 'color' in kwds or self.colormap is not None
nocolor_style = style is None or re.match('[a-z]+', style) is None
if (has_color or self.subplots) and nocolor_style:
kwds['color'] = colors[col_num % len(colors)]
return style, kwds
def _get_colors(self, num_colors=None, color_kwds='color'):
if num_colors is None:
num_colors = self.nseries
return _get_standard_colors(num_colors=num_colors,
colormap=self.colormap,
color=self.kwds.get(color_kwds))
def _parse_errorbars(self, label, err):
"""
Look for error keyword arguments and return the actual errorbar data
or return the error DataFrame/dict
Error bars can be specified in several ways:
Series: the user provides a pandas.Series object of the same
length as the data
ndarray: provides a np.ndarray of the same length as the data
DataFrame/dict: error values are paired with keys matching the
key in the plotted DataFrame
str: the name of the column within the plotted DataFrame
"""
if err is None:
return None
from pandas import DataFrame, Series
def match_labels(data, e):
e = e.reindex(data.index)
return e
# key-matched DataFrame
if isinstance(err, DataFrame):
err = match_labels(self.data, err)
# key-matched dict
elif isinstance(err, dict):
pass
# Series of error values
elif isinstance(err, Series):
# broadcast error series across data
err = match_labels(self.data, err)
err = np.atleast_2d(err)
err = np.tile(err, (self.nseries, 1))
# errors are a column in the dataframe
elif isinstance(err, string_types):
evalues = self.data[err].values
self.data = self.data[self.data.columns.drop(err)]
err = np.atleast_2d(evalues)
err = np.tile(err, (self.nseries, 1))
elif is_list_like(err):
if is_iterator(err):
err = np.atleast_2d(list(err))
else:
# raw error values
err = np.atleast_2d(err)
err_shape = err.shape
# asymmetrical error bars
if err.ndim == 3:
if (err_shape[0] != self.nseries) or \
(err_shape[1] != 2) or \
(err_shape[2] != len(self.data)):
msg = "Asymmetrical error bars should be provided " + \
"with the shape (%u, 2, %u)" % \
(self.nseries, len(self.data))
raise ValueError(msg)
# broadcast errors to each data series
if len(err) == 1:
err = np.tile(err, (self.nseries, 1))
elif is_number(err):
err = np.tile([err], (self.nseries, len(self.data)))
else:
msg = "No valid %s detected" % label
raise ValueError(msg)
return err
def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True):
from pandas import DataFrame
errors = {}
for kw, flag in zip(['xerr', 'yerr'], [xerr, yerr]):
if flag:
err = self.errors[kw]
# user provided label-matched dataframe of errors
if isinstance(err, (DataFrame, dict)):
if label is not None and label in err.keys():
err = err[label]
else:
err = None
elif index is not None and err is not None:
err = err[index]
if err is not None:
errors[kw] = err
return errors
def _get_subplots(self):
from matplotlib.axes import Subplot
return [ax for ax in self.axes[0].get_figure().get_axes()
if isinstance(ax, Subplot)]
def _get_axes_layout(self):
axes = self._get_subplots()
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
class PlanePlot(MPLPlot):
"""
Abstract class for plotting on plane, currently scatter and hexbin.
"""
_layout_type = 'single'
def __init__(self, data, x, y, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError(self._kind + ' requires and x and y column')
if is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
if len(self.data[x]._get_numeric_data()) == 0:
raise ValueError(self._kind + ' requires x column to be numeric')
if len(self.data[y]._get_numeric_data()) == 0:
raise ValueError(self._kind + ' requires y column to be numeric')
self.x = x
self.y = y
@property
def nseries(self):
return 1
def _post_plot_logic(self, ax, data):
x, y = self.x, self.y
ax.set_ylabel(pprint_thing(y))
ax.set_xlabel(pprint_thing(x))
class ScatterPlot(PlanePlot):
_kind = 'scatter'
def __init__(self, data, x, y, s=None, c=None, **kwargs):
if s is None:
# hide the matplotlib default for size, in case we want to change
# the handling of this argument later
s = 20
super(ScatterPlot, self).__init__(data, x, y, s=s, **kwargs)
if is_integer(c) and not self.data.columns.holds_integer():
c = self.data.columns[c]
self.c = c
def _make_plot(self):
x, y, c, data = self.x, self.y, self.c, self.data
ax = self.axes[0]
c_is_column = is_hashable(c) and c in self.data.columns
# plot a colorbar only if a colormap is provided or necessary
cb = self.kwds.pop('colorbar', self.colormap or c_is_column)
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'Greys'
cmap = self.plt.cm.get_cmap(cmap)
color = self.kwds.pop("color", None)
if c is not None and color is not None:
raise TypeError('Specify exactly one of `c` and `color`')
elif c is None and color is None:
c_values = self.plt.rcParams['patch.facecolor']
elif color is not None:
c_values = color
elif c_is_column:
c_values = self.data[c].values
else:
c_values = c
if self.legend and hasattr(self, 'label'):
label = self.label
else:
label = None
scatter = ax.scatter(data[x].values, data[y].values, c=c_values,
label=label, cmap=cmap, **self.kwds)
if cb:
img = ax.collections[0]
kws = dict(ax=ax)
if self.mpl_ge_1_3_1():
kws['label'] = c if c_is_column else ''
self.fig.colorbar(img, **kws)
if label is not None:
self._add_legend_handle(scatter, label)
else:
self.legend = False
errors_x = self._get_errorbars(label=x, index=0, yerr=False)
errors_y = self._get_errorbars(label=y, index=0, xerr=False)
if len(errors_x) > 0 or len(errors_y) > 0:
err_kwds = dict(errors_x, **errors_y)
err_kwds['ecolor'] = scatter.get_facecolor()[0]
ax.errorbar(data[x].values, data[y].values,
linestyle='none', **err_kwds)
class HexBinPlot(PlanePlot):
_kind = 'hexbin'
def __init__(self, data, x, y, C=None, **kwargs):
super(HexBinPlot, self).__init__(data, x, y, **kwargs)
if is_integer(C) and not self.data.columns.holds_integer():
C = self.data.columns[C]
self.C = C
def _make_plot(self):
x, y, data, C = self.x, self.y, self.data, self.C
ax = self.axes[0]
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'BuGn'
cmap = self.plt.cm.get_cmap(cmap)
cb = self.kwds.pop('colorbar', True)
if C is None:
c_values = None
else:
c_values = data[C].values
ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap,
**self.kwds)
if cb:
img = ax.collections[0]
self.fig.colorbar(img, ax=ax)
def _make_legend(self):
pass
class LinePlot(MPLPlot):
_kind = 'line'
_default_rot = 0
orientation = 'vertical'
def __init__(self, data, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if self.stacked:
self.data = self.data.fillna(value=0)
self.x_compat = plot_params['x_compat']
if 'x_compat' in self.kwds:
self.x_compat = bool(self.kwds.pop('x_compat'))
def _is_ts_plot(self):
# this is slightly deceptive
return not self.x_compat and self.use_index and self._use_dynamic_x()
def _use_dynamic_x(self):
from pandas.plotting._timeseries import _use_dynamic_x
return _use_dynamic_x(self._get_ax(0), self.data)
def _make_plot(self):
if self._is_ts_plot():
from pandas.plotting._timeseries import _maybe_convert_index
data = _maybe_convert_index(self._get_ax(0), self.data)
x = data.index # dummy, not used
plotf = self._ts_plot
it = self._iter_data(data=data, keep_index=True)
else:
x = self._get_xticks(convert_period=True)
plotf = self._plot
it = self._iter_data()
stacking_id = self._get_stacking_id()
is_errorbar = _any_not_none(*self.errors.values())
colors = self._get_colors()
for i, (label, y) in enumerate(it):
ax = self._get_ax(i)
kwds = self.kwds.copy()
style, kwds = self._apply_style_colors(colors, kwds, i, label)
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = pprint_thing(label) # .encode('utf-8')
kwds['label'] = label
newlines = plotf(ax, x, y, style=style, column_num=i,
stacking_id=stacking_id,
is_errorbar=is_errorbar,
**kwds)
self._add_legend_handle(newlines[0], label, index=i)
if not _mpl_ge_2_0_0():
lines = _get_all_lines(ax)
left, right = _get_xlim(lines)
ax.set_xlim(left, right)
@classmethod
def _plot(cls, ax, x, y, style=None, column_num=None,
stacking_id=None, **kwds):
# column_num is used to get the target column from protf in line and
# area plots
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(y))
y_values = cls._get_stacked_values(ax, stacking_id, y, kwds['label'])
lines = MPLPlot._plot(ax, x, y_values, style=style, **kwds)
cls._update_stacker(ax, stacking_id, y)
return lines
@classmethod
def _ts_plot(cls, ax, x, data, style=None, **kwds):
from pandas.plotting._timeseries import (_maybe_resample,
_decorate_axes,
format_dateaxis)
# accept x to be consistent with normal plot func,
# x is not passed to tsplot as it uses data.index as x coordinate
# column_num must be in kwds for stacking purpose
freq, data = _maybe_resample(data, ax, kwds)
# Set ax with freq info
_decorate_axes(ax, freq, kwds)
# digging deeper
if hasattr(ax, 'left_ax'):
_decorate_axes(ax.left_ax, freq, kwds)
if hasattr(ax, 'right_ax'):
_decorate_axes(ax.right_ax, freq, kwds)
ax._plot_data.append((data, cls._kind, kwds))
lines = cls._plot(ax, data.index, data.values, style=style, **kwds)
# set date formatter, locators and rescale limits
format_dateaxis(ax, ax.freq, data.index)
return lines
def _get_stacking_id(self):
if self.stacked:
return id(self.data)
else:
return None
@classmethod
def _initialize_stacker(cls, ax, stacking_id, n):
if stacking_id is None:
return
if not hasattr(ax, '_stacker_pos_prior'):
ax._stacker_pos_prior = {}
if not hasattr(ax, '_stacker_neg_prior'):
ax._stacker_neg_prior = {}
ax._stacker_pos_prior[stacking_id] = np.zeros(n)
ax._stacker_neg_prior[stacking_id] = np.zeros(n)
@classmethod
def _get_stacked_values(cls, ax, stacking_id, values, label):
if stacking_id is None:
return values
if not hasattr(ax, '_stacker_pos_prior'):
# stacker may not be initialized for subplots
cls._initialize_stacker(ax, stacking_id, len(values))
if (values >= 0).all():
return ax._stacker_pos_prior[stacking_id] + values
elif (values <= 0).all():
return ax._stacker_neg_prior[stacking_id] + values
raise ValueError('When stacked is True, each column must be either '
'all positive or negative.'
'{0} contains both positive and negative values'
.format(label))
@classmethod
def _update_stacker(cls, ax, stacking_id, values):
if stacking_id is None:
return
if (values >= 0).all():
ax._stacker_pos_prior[stacking_id] += values
elif (values <= 0).all():
ax._stacker_neg_prior[stacking_id] += values
def _post_plot_logic(self, ax, data):
condition = (not self._use_dynamic_x() and
data.index.is_all_dates and
not self.subplots or
(self.subplots and self.sharex))
index_name = self._get_index_name()
if condition:
# irregular TS rotated 30 deg. by default
# probably a better place to check / set this.
if not self._rot_set:
self.rot = 30
format_date_labels(ax, rot=self.rot)
if index_name is not None and self.use_index:
ax.set_xlabel(index_name)
class AreaPlot(LinePlot):
_kind = 'area'
def __init__(self, data, **kwargs):
kwargs.setdefault('stacked', True)
data = data.fillna(value=0)
LinePlot.__init__(self, data, **kwargs)
if not self.stacked:
# use smaller alpha to distinguish overlap
self.kwds.setdefault('alpha', 0.5)
if self.logy or self.loglog:
raise ValueError("Log-y scales are not supported in area plot")
@classmethod
def _plot(cls, ax, x, y, style=None, column_num=None,
stacking_id=None, is_errorbar=False, **kwds):
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(y))
y_values = cls._get_stacked_values(ax, stacking_id, y, kwds['label'])
# need to remove label, because subplots uses mpl legend as it is
line_kwds = kwds.copy()
if cls.mpl_ge_1_5_0():
line_kwds.pop('label')
lines = MPLPlot._plot(ax, x, y_values, style=style, **line_kwds)
# get data from the line to get coordinates for fill_between
xdata, y_values = lines[0].get_data(orig=False)
# unable to use ``_get_stacked_values`` here to get starting point
if stacking_id is None:
start = np.zeros(len(y))
elif (y >= 0).all():
start = ax._stacker_pos_prior[stacking_id]
elif (y <= 0).all():
start = ax._stacker_neg_prior[stacking_id]
else:
start = np.zeros(len(y))
if 'color' not in kwds:
kwds['color'] = lines[0].get_color()
rect = ax.fill_between(xdata, start, y_values, **kwds)
cls._update_stacker(ax, stacking_id, y)
# LinePlot expects list of artists
res = [rect] if cls.mpl_ge_1_5_0() else lines
return res
def _add_legend_handle(self, handle, label, index=None):
if not self.mpl_ge_1_5_0():
from matplotlib.patches import Rectangle
# Because fill_between isn't supported in legend,
# specifically add Rectangle handle here
alpha = self.kwds.get('alpha', None)
handle = Rectangle((0, 0), 1, 1, fc=handle.get_color(),
alpha=alpha)
LinePlot._add_legend_handle(self, handle, label, index=index)
def _post_plot_logic(self, ax, data):
LinePlot._post_plot_logic(self, ax, data)
if self.ylim is None:
if (data >= 0).all().all():
ax.set_ylim(0, None)
elif (data <= 0).all().all():
ax.set_ylim(None, 0)
class BarPlot(MPLPlot):
_kind = 'bar'
_default_rot = 90
orientation = 'vertical'
def __init__(self, data, **kwargs):
# we have to treat a series differently than a
# 1-column DataFrame w.r.t. color handling
self._is_series = isinstance(data, ABCSeries)
self.bar_width = kwargs.pop('width', 0.5)
pos = kwargs.pop('position', 0.5)
kwargs.setdefault('align', 'center')
self.tick_pos = np.arange(len(data))
self.bottom = kwargs.pop('bottom', 0)
self.left = kwargs.pop('left', 0)
self.log = kwargs.pop('log', False)
MPLPlot.__init__(self, data, **kwargs)
if self.stacked or self.subplots:
self.tickoffset = self.bar_width * pos
if kwargs['align'] == 'edge':
self.lim_offset = self.bar_width / 2
else:
self.lim_offset = 0
else:
if kwargs['align'] == 'edge':
w = self.bar_width / self.nseries
self.tickoffset = self.bar_width * (pos - 0.5) + w * 0.5
self.lim_offset = w * 0.5
else:
self.tickoffset = self.bar_width * pos
self.lim_offset = 0
self.ax_pos = self.tick_pos - self.tickoffset
def _args_adjust(self):
if is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
if is_list_like(self.left):
self.left = np.array(self.left)
@classmethod
def _plot(cls, ax, x, y, w, start=0, log=False, **kwds):
return ax.bar(x, y, w, bottom=start, log=log, **kwds)
@property
def _start_base(self):
return self.bottom
def _make_plot(self):
import matplotlib as mpl
colors = self._get_colors()
ncolors = len(colors)
pos_prior = neg_prior = np.zeros(len(self.data))
K = self.nseries
for i, (label, y) in enumerate(self._iter_data(fillna=0)):
ax = self._get_ax(i)
kwds = self.kwds.copy()
if self._is_series:
kwds['color'] = colors
else:
kwds['color'] = colors[i % ncolors]
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = pprint_thing(label)
if (('yerr' in kwds) or ('xerr' in kwds)) \
and (kwds.get('ecolor') is None):
kwds['ecolor'] = mpl.rcParams['xtick.color']
start = 0
if self.log and (y >= 1).all():
start = 1
start = start + self._start_base
if self.subplots:
w = self.bar_width / 2
rect = self._plot(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label,
log=self.log, **kwds)
ax.set_title(label)
elif self.stacked:
mask = y > 0
start = np.where(mask, pos_prior, neg_prior) + self._start_base
w = self.bar_width / 2
rect = self._plot(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label,
log=self.log, **kwds)
pos_prior = pos_prior + np.where(mask, y, 0)
neg_prior = neg_prior + np.where(mask, 0, y)
else:
w = self.bar_width / K
rect = self._plot(ax, self.ax_pos + (i + 0.5) * w, y, w,
start=start, label=label,
log=self.log, **kwds)
self._add_legend_handle(rect, label, index=i)
def _post_plot_logic(self, ax, data):
if self.use_index:
str_index = [pprint_thing(key) for key in data.index]
else:
str_index = [pprint_thing(key) for key in range(data.shape[0])]
name = self._get_index_name()
s_edge = self.ax_pos[0] - 0.25 + self.lim_offset
e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset
self._decorate_ticks(ax, name, str_index, s_edge, e_edge)
def _decorate_ticks(self, ax, name, ticklabels, start_edge, end_edge):
ax.set_xlim((start_edge, end_edge))
ax.set_xticks(self.tick_pos)
ax.set_xticklabels(ticklabels)
if name is not None and self.use_index:
ax.set_xlabel(name)
class BarhPlot(BarPlot):
_kind = 'barh'
_default_rot = 0
orientation = 'horizontal'
@property
def _start_base(self):
return self.left
@classmethod
def _plot(cls, ax, x, y, w, start=0, log=False, **kwds):
return ax.barh(x, y, w, left=start, log=log, **kwds)
def _decorate_ticks(self, ax, name, ticklabels, start_edge, end_edge):
# horizontal bars
ax.set_ylim((start_edge, end_edge))
ax.set_yticks(self.tick_pos)
ax.set_yticklabels(ticklabels)
if name is not None and self.use_index:
ax.set_ylabel(name)
class HistPlot(LinePlot):
_kind = 'hist'
def __init__(self, data, bins=10, bottom=0, **kwargs):
self.bins = bins # use mpl default
self.bottom = bottom
# Do not call LinePlot.__init__ which may fill nan
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if is_integer(self.bins):
# create common bin edge
values = (self.data._convert(datetime=True)._get_numeric_data())
values = np.ravel(values)
values = values[~isna(values)]
hist, self.bins = np.histogram(
values, bins=self.bins,
range=self.kwds.get('range', None),
weights=self.kwds.get('weights', None))
if is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
@classmethod
def _plot(cls, ax, y, style=None, bins=None, bottom=0, column_num=0,
stacking_id=None, **kwds):
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(bins) - 1)
y = y[~isna(y)]
base = np.zeros(len(bins) - 1)
bottom = bottom + \
cls._get_stacked_values(ax, stacking_id, base, kwds['label'])
# ignore style
n, bins, patches = ax.hist(y, bins=bins, bottom=bottom, **kwds)
cls._update_stacker(ax, stacking_id, n)
return patches
def _make_plot(self):
colors = self._get_colors()
stacking_id = self._get_stacking_id()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
kwds = self.kwds.copy()
label = pprint_thing(label)
kwds['label'] = label
style, kwds = self._apply_style_colors(colors, kwds, i, label)
if style is not None:
kwds['style'] = style
kwds = self._make_plot_keywords(kwds, y)
artists = self._plot(ax, y, column_num=i,
stacking_id=stacking_id, **kwds)
self._add_legend_handle(artists[0], label, index=i)
def _make_plot_keywords(self, kwds, y):
"""merge BoxPlot/KdePlot properties to passed kwds"""
# y is required for KdePlot
kwds['bottom'] = self.bottom
kwds['bins'] = self.bins
return kwds
def _post_plot_logic(self, ax, data):
if self.orientation == 'horizontal':
ax.set_xlabel('Frequency')
else:
ax.set_ylabel('Frequency')
@property
def orientation(self):
if self.kwds.get('orientation', None) == 'horizontal':
return 'horizontal'
else:
return 'vertical'
class KdePlot(HistPlot):
_kind = 'kde'
orientation = 'vertical'
def __init__(self, data, bw_method=None, ind=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
self.bw_method = bw_method
self.ind = ind
def _args_adjust(self):
pass
def _get_ind(self, y):
if self.ind is None:
# np.nanmax() and np.nanmin() ignores the missing values
sample_range = np.nanmax(y) - np.nanmin(y)
ind = np.linspace(np.nanmin(y) - 0.5 * sample_range,
np.nanmax(y) + 0.5 * sample_range, 1000)
else:
ind = self.ind
return ind
@classmethod
def _plot(cls, ax, y, style=None, bw_method=None, ind=None,
column_num=None, stacking_id=None, **kwds):
from scipy.stats import gaussian_kde
from scipy import __version__ as spv
y = remove_na_arraylike(y)
if LooseVersion(spv) >= '0.11.0':
gkde = gaussian_kde(y, bw_method=bw_method)
else:
gkde = gaussian_kde(y)
if bw_method is not None:
msg = ('bw_method was added in Scipy 0.11.0.' +
' Scipy version in use is %s.' % spv)
warnings.warn(msg)
y = gkde.evaluate(ind)
lines = MPLPlot._plot(ax, ind, y, style=style, **kwds)
return lines
def _make_plot_keywords(self, kwds, y):
kwds['bw_method'] = self.bw_method
kwds['ind'] = self._get_ind(y)
return kwds
def _post_plot_logic(self, ax, data):
ax.set_ylabel('Density')
class PiePlot(MPLPlot):
_kind = 'pie'
_layout_type = 'horizontal'
def __init__(self, data, kind=None, **kwargs):
data = data.fillna(value=0)
if (data < 0).any().any():
raise ValueError("{0} doesn't allow negative values".format(kind))
MPLPlot.__init__(self, data, kind=kind, **kwargs)
def _args_adjust(self):
self.grid = False
self.logy = False
self.logx = False
self.loglog = False
def _validate_color_args(self):
pass
def _make_plot(self):
colors = self._get_colors(
num_colors=len(self.data), color_kwds='colors')
self.kwds.setdefault('colors', colors)
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
if label is not None:
label = pprint_thing(label)
ax.set_ylabel(label)
kwds = self.kwds.copy()
def blank_labeler(label, value):
if value == 0:
return ''
else:
return label
idx = [pprint_thing(v) for v in self.data.index]
labels = kwds.pop('labels', idx)
# labels is used for each wedge's labels
# Blank out labels for values of 0 so they don't overlap
# with nonzero wedges
if labels is not None:
blabels = [blank_labeler(l, value) for
l, value in zip(labels, y)]
else:
blabels = None
results = ax.pie(y, labels=blabels, **kwds)
if kwds.get('autopct', None) is not None:
patches, texts, autotexts = results
else:
patches, texts = results
autotexts = []
if self.fontsize is not None:
for t in texts + autotexts:
t.set_fontsize(self.fontsize)
# leglabels is used for legend labels
leglabels = labels if labels is not None else idx
for p, l in zip(patches, leglabels):
self._add_legend_handle(p, l)
class BoxPlot(LinePlot):
_kind = 'box'
_layout_type = 'horizontal'
_valid_return_types = (None, 'axes', 'dict', 'both')
# namedtuple to hold results
BP = namedtuple("Boxplot", ['ax', 'lines'])
def __init__(self, data, return_type='axes', **kwargs):
# Do not call LinePlot.__init__ which may fill nan
if return_type not in self._valid_return_types:
raise ValueError(
"return_type must be {None, 'axes', 'dict', 'both'}")
self.return_type = return_type
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if self.subplots:
# Disable label ax sharing. Otherwise, all subplots shows last
# column label
if self.orientation == 'vertical':
self.sharex = False
else:
self.sharey = False
@classmethod
def _plot(cls, ax, y, column_num=None, return_type='axes', **kwds):
if y.ndim == 2:
y = [remove_na_arraylike(v) for v in y]
# Boxplot fails with empty arrays, so need to add a NaN
# if any cols are empty
# GH 8181
y = [v if v.size > 0 else np.array([np.nan]) for v in y]
else:
y = remove_na_arraylike(y)
bp = ax.boxplot(y, **kwds)
if return_type == 'dict':
return bp, bp
elif return_type == 'both':
return cls.BP(ax=ax, lines=bp), bp
else:
return ax, bp
def _validate_color_args(self):
if 'color' in self.kwds:
if self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
self.color = self.kwds.pop('color')
if isinstance(self.color, dict):
valid_keys = ['boxes', 'whiskers', 'medians', 'caps']
for key, values in compat.iteritems(self.color):
if key not in valid_keys:
raise ValueError("color dict contains invalid "
"key '{0}' "
"The key must be either {1}"
.format(key, valid_keys))
else:
self.color = None
# get standard colors for default
colors = _get_standard_colors(num_colors=3,
colormap=self.colormap,
color=None)
# use 2 colors by default, for box/whisker and median
# flier colors isn't needed here
# because it can be specified by ``sym`` kw
self._boxes_c = colors[0]
self._whiskers_c = colors[0]
self._medians_c = colors[2]
self._caps_c = 'k' # mpl default
def _get_colors(self, num_colors=None, color_kwds='color'):
pass
def maybe_color_bp(self, bp):
if isinstance(self.color, dict):
boxes = self.color.get('boxes', self._boxes_c)
whiskers = self.color.get('whiskers', self._whiskers_c)
medians = self.color.get('medians', self._medians_c)
caps = self.color.get('caps', self._caps_c)
else:
# Other types are forwarded to matplotlib
# If None, use default colors
boxes = self.color or self._boxes_c
whiskers = self.color or self._whiskers_c
medians = self.color or self._medians_c
caps = self.color or self._caps_c
from matplotlib.artist import setp
setp(bp['boxes'], color=boxes, alpha=1)
setp(bp['whiskers'], color=whiskers, alpha=1)
setp(bp['medians'], color=medians, alpha=1)
setp(bp['caps'], color=caps, alpha=1)
def _make_plot(self):
if self.subplots:
from pandas.core.series import Series
self._return_obj = Series()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
kwds = self.kwds.copy()
ret, bp = self._plot(ax, y, column_num=i,
return_type=self.return_type, **kwds)
self.maybe_color_bp(bp)
self._return_obj[label] = ret
label = [pprint_thing(label)]
self._set_ticklabels(ax, label)
else:
y = self.data.values.T
ax = self._get_ax(0)
kwds = self.kwds.copy()
ret, bp = self._plot(ax, y, column_num=0,
return_type=self.return_type, **kwds)
self.maybe_color_bp(bp)
self._return_obj = ret
labels = [l for l, _ in self._iter_data()]
labels = [pprint_thing(l) for l in labels]
if not self.use_index:
labels = [pprint_thing(key) for key in range(len(labels))]
self._set_ticklabels(ax, labels)
def _set_ticklabels(self, ax, labels):
if self.orientation == 'vertical':
ax.set_xticklabels(labels)
else:
ax.set_yticklabels(labels)
def _make_legend(self):
pass
def _post_plot_logic(self, ax, data):
pass
@property
def orientation(self):
if self.kwds.get('vert', True):
return 'vertical'
else:
return 'horizontal'
@property
def result(self):
if self.return_type is None:
return super(BoxPlot, self).result
else:
return self._return_obj
# kinds supported by both dataframe and series
_common_kinds = ['line', 'bar', 'barh',
'kde', 'density', 'area', 'hist', 'box']
# kinds supported by dataframe
_dataframe_kinds = ['scatter', 'hexbin']
# kinds supported only by series or dataframe single column
_series_kinds = ['pie']
_all_kinds = _common_kinds + _dataframe_kinds + _series_kinds
_klasses = [LinePlot, BarPlot, BarhPlot, KdePlot, HistPlot, BoxPlot,
ScatterPlot, HexBinPlot, AreaPlot, PiePlot]
_plot_klass = {}
for klass in _klasses:
_plot_klass[klass._kind] = klass
def _plot(data, x=None, y=None, subplots=False,
ax=None, kind='line', **kwds):
kind = _get_standard_kind(kind.lower().strip())
if kind in _all_kinds:
klass = _plot_klass[kind]
else:
raise ValueError("%r is not a valid plot kind" % kind)
from pandas import DataFrame
if kind in _dataframe_kinds:
if isinstance(data, DataFrame):
plot_obj = klass(data, x=x, y=y, subplots=subplots, ax=ax,
kind=kind, **kwds)
else:
raise ValueError("plot kind %r can only be used for data frames"
% kind)
elif kind in _series_kinds:
if isinstance(data, DataFrame):
if y is None and subplots is False:
msg = "{0} requires either y column or 'subplots=True'"
raise ValueError(msg.format(kind))
elif y is not None:
if is_integer(y) and not data.columns.holds_integer():
y = data.columns[y]
# converted to series actually. copy to not modify
data = data[y].copy()
data.index.name = y
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
else:
if isinstance(data, DataFrame):
if x is not None:
if is_integer(x) and not data.columns.holds_integer():
x = data.columns[x]
data = data.set_index(x)
if y is not None:
if is_integer(y) and not data.columns.holds_integer():
y = data.columns[y]
label = kwds['label'] if 'label' in kwds else y
series = data[y].copy() # Don't modify
series.name = label
for kw in ['xerr', 'yerr']:
if (kw in kwds) and \
(isinstance(kwds[kw], string_types) or
is_integer(kwds[kw])):
try:
kwds[kw] = data[kwds[kw]]
except (IndexError, KeyError, TypeError):
pass
data = series
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
plot_obj.generate()
plot_obj.draw()
return plot_obj.result
df_kind = """- 'scatter' : scatter plot
- 'hexbin' : hexbin plot"""
series_kind = ""
df_coord = """x : label or position, default None
y : label or position, default None
Allows plotting of one column versus another"""
series_coord = ""
df_unique = """stacked : boolean, default False in line and
bar plots, and True in area plot. If True, create stacked plot.
sort_columns : boolean, default False
Sort column names to determine plot ordering
secondary_y : boolean or sequence, default False
Whether to plot on the secondary y-axis
If a list/tuple, which columns to plot on secondary y-axis"""
series_unique = """label : label argument to provide to plot
secondary_y : boolean or sequence of ints, default False
If True then y-axis will be on the right"""
df_ax = """ax : matplotlib axes object, default None
subplots : boolean, default False
Make separate subplots for each column
sharex : boolean, default True if ax is None else False
In case subplots=True, share x axis and set some x axis labels to
invisible; defaults to True if ax is None otherwise False if an ax
is passed in; Be aware, that passing in both an ax and sharex=True
will alter all x axis labels for all axis in a figure!
sharey : boolean, default False
In case subplots=True, share y axis and set some y axis labels to
invisible
layout : tuple (optional)
(rows, columns) for the layout of subplots"""
series_ax = """ax : matplotlib axes object
If not passed, uses gca()"""
df_note = """- If `kind` = 'scatter' and the argument `c` is the name of a dataframe
column, the values of that column are used to color each point.
- If `kind` = 'hexbin', you can control the size of the bins with the
`gridsize` argument. By default, a histogram of the counts around each
`(x, y)` point is computed. You can specify alternative aggregations
by passing values to the `C` and `reduce_C_function` arguments.
`C` specifies the value at each `(x, y)` point and `reduce_C_function`
is a function of one argument that reduces all the values in a bin to
a single number (e.g. `mean`, `max`, `sum`, `std`)."""
series_note = ""
_shared_doc_df_kwargs = dict(klass='DataFrame', klass_obj='df',
klass_kind=df_kind, klass_coord=df_coord,
klass_ax=df_ax, klass_unique=df_unique,
klass_note=df_note)
_shared_doc_series_kwargs = dict(klass='Series', klass_obj='s',
klass_kind=series_kind,
klass_coord=series_coord, klass_ax=series_ax,
klass_unique=series_unique,
klass_note=series_note)
_shared_docs['plot'] = """
Make plots of %(klass)s using matplotlib / pylab.
*New in version 0.17.0:* Each plot kind has a corresponding method on the
``%(klass)s.plot`` accessor:
``%(klass_obj)s.plot(kind='line')`` is equivalent to
``%(klass_obj)s.plot.line()``.
Parameters
----------
data : %(klass)s
%(klass_coord)s
kind : str
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
%(klass_kind)s
%(klass_ax)s
figsize : a tuple (width, height) in inches
use_index : boolean, default True
Use index as ticks for x axis
title : string or list
Title to use for the plot. If a string is passed, print the string at
the top of the figure. If a list is passed and `subplots` is True,
print each item in the list above the corresponding subplot.
grid : boolean, default None (matlab style default)
Axis grid lines
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
matplotlib line style per column
logx : boolean, default False
Use log scaling on x axis
logy : boolean, default False
Use log scaling on y axis
loglog : boolean, default False
Use log scaling on both x and y axes
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal plots)
fontsize : int, default None
Font size for xticks and yticks
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
colorbar : boolean, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots)
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
table : boolean, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data will
be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for
detail.
xerr : same types as yerr.
%(klass_unique)s
mark_right : boolean, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
%(klass_note)s
"""
@Appender(_shared_docs['plot'] % _shared_doc_df_kwargs)
def plot_frame(data, x=None, y=None, kind='line', ax=None,
subplots=False, sharex=None, sharey=False, layout=None,
figsize=None, use_index=True, title=None, grid=None,
legend=True, style=None, logx=False, logy=False, loglog=False,
xticks=None, yticks=None, xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
secondary_y=False, sort_columns=False,
**kwds):
return _plot(data, kind=kind, x=x, y=y, ax=ax,
subplots=subplots, sharex=sharex, sharey=sharey,
layout=layout, figsize=figsize, use_index=use_index,
title=title, grid=grid, legend=legend,
style=style, logx=logx, logy=logy, loglog=loglog,
xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
rot=rot, fontsize=fontsize, colormap=colormap, table=table,
yerr=yerr, xerr=xerr,
secondary_y=secondary_y, sort_columns=sort_columns,
**kwds)
@Appender(_shared_docs['plot'] % _shared_doc_series_kwargs)
def plot_series(data, kind='line', ax=None, # Series unique
figsize=None, use_index=True, title=None, grid=None,
legend=False, style=None, logx=False, logy=False, loglog=False,
xticks=None, yticks=None, xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
label=None, secondary_y=False, # Series unique
**kwds):
import matplotlib.pyplot as plt
if ax is None and len(plt.get_fignums()) > 0:
ax = _gca()
ax = MPLPlot._get_ax_layer(ax)
return _plot(data, kind=kind, ax=ax,
figsize=figsize, use_index=use_index, title=title,
grid=grid, legend=legend,
style=style, logx=logx, logy=logy, loglog=loglog,
xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
rot=rot, fontsize=fontsize, colormap=colormap, table=table,
yerr=yerr, xerr=xerr,
label=label, secondary_y=secondary_y,
**kwds)
_shared_docs['boxplot'] = """
Make a box plot from DataFrame column optionally grouped by some columns or
other inputs
Parameters
----------
data : the pandas object holding the data
column : column name or list of names, or vector
Can be any valid input to groupby
by : string or sequence
Column in the DataFrame to group by
ax : Matplotlib axes object, optional
fontsize : int or string
rot : label rotation angle
figsize : A tuple (width, height) in inches
grid : Setting this to True will show the grid
layout : tuple (optional)
(rows, columns) for the layout of the plot
return_type : {None, 'axes', 'dict', 'both'}, default None
The kind of object to return. The default is ``axes``
'axes' returns the matplotlib axes the boxplot is drawn on;
'dict' returns a dictionary whose values are the matplotlib
Lines of the boxplot;
'both' returns a namedtuple with the axes and dict.
When grouping with ``by``, a Series mapping columns to ``return_type``
is returned, unless ``return_type`` is None, in which case a NumPy
array of axes is returned with the same shape as ``layout``.
See the prose documentation for more.
kwds : other plotting keyword arguments to be passed to matplotlib boxplot
function
Returns
-------
lines : dict
ax : matplotlib Axes
(ax, lines): namedtuple
Notes
-----
Use ``return_type='dict'`` when you want to tweak the appearance
of the lines after plotting. In this case a dict containing the Lines
making up the boxes, caps, fliers, medians, and whiskers is returned.
"""
@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)
def boxplot(data, column=None, by=None, ax=None, fontsize=None,
rot=0, grid=True, figsize=None, layout=None, return_type=None,
**kwds):
# validate return_type:
if return_type not in BoxPlot._valid_return_types:
raise ValueError("return_type must be {'axes', 'dict', 'both'}")
from pandas import Series, DataFrame
if isinstance(data, Series):
data = DataFrame({'x': data})
column = 'x'
def _get_colors():
return _get_standard_colors(color=kwds.get('color'), num_colors=1)
def maybe_color_bp(bp):
if 'color' not in kwds:
from matplotlib.artist import setp
setp(bp['boxes'], color=colors[0], alpha=1)
setp(bp['whiskers'], color=colors[0], alpha=1)
setp(bp['medians'], color=colors[2], alpha=1)
def plot_group(keys, values, ax):
keys = [pprint_thing(x) for x in keys]
values = [np.asarray(remove_na_arraylike(v)) for v in values]
bp = ax.boxplot(values, **kwds)
if fontsize is not None:
ax.tick_params(axis='both', labelsize=fontsize)
if kwds.get('vert', 1):
ax.set_xticklabels(keys, rotation=rot)
else:
ax.set_yticklabels(keys, rotation=rot)
maybe_color_bp(bp)
# Return axes in multiplot case, maybe revisit later # 985
if return_type == 'dict':
return bp
elif return_type == 'both':
return BoxPlot.BP(ax=ax, lines=bp)
else:
return ax
colors = _get_colors()
if column is None:
columns = None
else:
if isinstance(column, (list, tuple)):
columns = column
else:
columns = [column]
if by is not None:
# Prefer array return type for 2-D plots to match the subplot layout
# https://github.com/pandas-dev/pandas/pull/12216#issuecomment-241175580
result = _grouped_plot_by_column(plot_group, data, columns=columns,
by=by, grid=grid, figsize=figsize,
ax=ax, layout=layout,
return_type=return_type)
else:
if return_type is None:
return_type = 'axes'
if layout is not None:
raise ValueError("The 'layout' keyword is not supported when "
"'by' is None")
if ax is None:
rc = {'figure.figsize': figsize} if figsize is not None else {}
ax = _gca(rc)
data = data._get_numeric_data()
if columns is None:
columns = data.columns
else:
data = data[columns]
result = plot_group(columns, data.values.T, ax)
ax.grid(grid)
return result
@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)
def boxplot_frame(self, column=None, by=None, ax=None, fontsize=None, rot=0,
grid=True, figsize=None, layout=None,
return_type=None, **kwds):
import matplotlib.pyplot as plt
_setup()
ax = boxplot(self, column=column, by=by, ax=ax, fontsize=fontsize,
grid=grid, rot=rot, figsize=figsize, layout=layout,
return_type=return_type, **kwds)
plt.draw_if_interactive()
return ax
def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False,
**kwargs):
"""
Make a scatter plot from two DataFrame columns
Parameters
----------
data : DataFrame
x : Column name for the x-axis values
y : Column name for the y-axis values
ax : Matplotlib axis object
figsize : A tuple (width, height) in inches
grid : Setting this to True will show the grid
kwargs : other plotting keyword arguments
To be passed to scatter function
Returns
-------
fig : matplotlib.Figure
"""
import matplotlib.pyplot as plt
kwargs.setdefault('edgecolors', 'none')
def plot_group(group, ax):
xvals = group[x].values
yvals = group[y].values
ax.scatter(xvals, yvals, **kwargs)
ax.grid(grid)
if by is not None:
fig = _grouped_plot(plot_group, data, by=by, figsize=figsize, ax=ax)
else:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
plot_group(data, ax)
ax.set_ylabel(pprint_thing(y))
ax.set_xlabel(pprint_thing(x))
ax.grid(grid)
return fig
def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False,
sharey=False, figsize=None, layout=None, bins=10, **kwds):
"""
Draw histogram of the DataFrame's series using matplotlib / pylab.
Parameters
----------
data : DataFrame
column : string or sequence
If passed, will be used to limit data to a subset of columns
by : object, optional
If passed, then used to form histograms for separate groups
grid : boolean, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
ax : matplotlib axes object, default None
sharex : boolean, default True if ax is None else False
In case subplots=True, share x axis and set some x axis labels to
invisible; defaults to True if ax is None otherwise False if an ax
is passed in; Be aware, that passing in both an ax and sharex=True
will alter all x axis labels for all subplots in a figure!
sharey : boolean, default False
In case subplots=True, share y axis and set some y axis labels to
invisible
figsize : tuple
The size of the figure to create in inches by default
layout : tuple, optional
Tuple of (rows, columns) for the layout of the histograms
bins : integer, default 10
Number of histogram bins to be used
kwds : other plotting keyword arguments
To be passed to hist function
"""
_setup()
if by is not None:
axes = grouped_hist(data, column=column, by=by, ax=ax, grid=grid,
figsize=figsize, sharex=sharex, sharey=sharey,
layout=layout, bins=bins, xlabelsize=xlabelsize,
xrot=xrot, ylabelsize=ylabelsize,
yrot=yrot, **kwds)
return axes
if column is not None:
if not isinstance(column, (list, np.ndarray, Index)):
column = [column]
data = data[column]
data = data._get_numeric_data()
naxes = len(data.columns)
fig, axes = _subplots(naxes=naxes, ax=ax, squeeze=False,
sharex=sharex, sharey=sharey, figsize=figsize,
layout=layout)
_axes = _flatten(axes)
for i, col in enumerate(_try_sort(data.columns)):
ax = _axes[i]
ax.hist(data[col].dropna().values, bins=bins, **kwds)
ax.set_title(col)
ax.grid(grid)
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
fig.subplots_adjust(wspace=0.3, hspace=0.3)
return axes
def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, figsize=None,
bins=10, **kwds):
"""
Draw histogram of the input series using matplotlib
Parameters
----------
by : object, optional
If passed, then used to form histograms for separate groups
ax : matplotlib axis object
If not passed, uses gca()
grid : boolean, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
figsize : tuple, default None
figure size in inches by default
bins: integer, default 10
Number of histogram bins to be used
kwds : keywords
To be passed to the actual plotting function
Notes
-----
See matplotlib documentation online for more on this
"""
import matplotlib.pyplot as plt
if by is None:
if kwds.get('layout', None) is not None:
raise ValueError("The 'layout' keyword is not supported when "
"'by' is None")
# hack until the plotting interface is a bit more unified
fig = kwds.pop('figure', plt.gcf() if plt.get_fignums() else
plt.figure(figsize=figsize))
if (figsize is not None and tuple(figsize) !=
tuple(fig.get_size_inches())):
fig.set_size_inches(*figsize, forward=True)
if ax is None:
ax = fig.gca()
elif ax.get_figure() != fig:
raise AssertionError('passed axis not bound to passed figure')
values = self.dropna().values
ax.hist(values, bins=bins, **kwds)
ax.grid(grid)
axes = np.array([ax])
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
else:
if 'figure' in kwds:
raise ValueError("Cannot pass 'figure' when using the "
"'by' argument, since a new 'Figure' instance "
"will be created")
axes = grouped_hist(self, by=by, ax=ax, grid=grid, figsize=figsize,
bins=bins, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot, **kwds)
if hasattr(axes, 'ndim'):
if axes.ndim == 1 and len(axes) == 1:
return axes[0]
return axes
def grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None,
layout=None, sharex=False, sharey=False, rot=90, grid=True,
xlabelsize=None, xrot=None, ylabelsize=None, yrot=None,
**kwargs):
"""
Grouped histogram
Parameters
----------
data: Series/DataFrame
column: object, optional
by: object, optional
ax: axes, optional
bins: int, default 50
figsize: tuple, optional
layout: optional
sharex: boolean, default False
sharey: boolean, default False
rot: int, default 90
grid: bool, default True
kwargs: dict, keyword arguments passed to matplotlib.Axes.hist
Returns
-------
axes: collection of Matplotlib Axes
"""
def plot_group(group, ax):
ax.hist(group.dropna().values, bins=bins, **kwargs)
xrot = xrot or rot
fig, axes = _grouped_plot(plot_group, data, column=column,
by=by, sharex=sharex, sharey=sharey, ax=ax,
figsize=figsize, layout=layout, rot=rot)
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9,
hspace=0.5, wspace=0.3)
return axes
def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None,
rot=0, grid=True, ax=None, figsize=None,
layout=None, **kwds):
"""
Make box plots from DataFrameGroupBy data.
Parameters
----------
grouped : Grouped DataFrame
subplots :
* ``False`` - no subplots will be used
* ``True`` - create a subplot for each group
column : column name or list of names, or vector
Can be any valid input to groupby
fontsize : int or string
rot : label rotation angle
grid : Setting this to True will show the grid
ax : Matplotlib axis object, default None
figsize : A tuple (width, height) in inches
layout : tuple (optional)
(rows, columns) for the layout of the plot
kwds : other plotting keyword arguments to be passed to matplotlib boxplot
function
Returns
-------
dict of key/value = group key/DataFrame.boxplot return value
or DataFrame.boxplot return value in case subplots=figures=False
Examples
--------
>>> import pandas
>>> import numpy as np
>>> import itertools
>>>
>>> tuples = [t for t in itertools.product(range(1000), range(4))]
>>> index = pandas.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1'])
>>> data = np.random.randn(len(index),4)
>>> df = pandas.DataFrame(data, columns=list('ABCD'), index=index)
>>>
>>> grouped = df.groupby(level='lvl1')
>>> boxplot_frame_groupby(grouped)
>>>
>>> grouped = df.unstack(level='lvl1').groupby(level=0, axis=1)
>>> boxplot_frame_groupby(grouped, subplots=False)
"""
_setup()
if subplots is True:
naxes = len(grouped)
fig, axes = _subplots(naxes=naxes, squeeze=False,
ax=ax, sharex=False, sharey=True,
figsize=figsize, layout=layout)
axes = _flatten(axes)
from pandas.core.series import Series
ret = Series()
for (key, group), ax in zip(grouped, axes):
d = group.boxplot(ax=ax, column=column, fontsize=fontsize,
rot=rot, grid=grid, **kwds)
ax.set_title(pprint_thing(key))
ret.loc[key] = d
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1,
right=0.9, wspace=0.2)
else:
from pandas.core.reshape.concat import concat
keys, frames = zip(*grouped)
if grouped.axis == 0:
df = concat(frames, keys=keys, axis=1)
else:
if len(frames) > 1:
df = frames[0].join(frames[1::])
else:
df = frames[0]
ret = df.boxplot(column=column, fontsize=fontsize, rot=rot,
grid=grid, ax=ax, figsize=figsize,
layout=layout, **kwds)
return ret
def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True,
figsize=None, sharex=True, sharey=True, layout=None,
rot=0, ax=None, **kwargs):
from pandas import DataFrame
if figsize == 'default':
# allowed to specify mpl default with 'default'
warnings.warn("figsize='default' is deprecated. Specify figure"
"size by tuple instead", FutureWarning, stacklevel=4)
figsize = None
grouped = data.groupby(by)
if column is not None:
grouped = grouped[column]
naxes = len(grouped)
fig, axes = _subplots(naxes=naxes, figsize=figsize,
sharex=sharex, sharey=sharey, ax=ax,
layout=layout)
_axes = _flatten(axes)
for i, (key, group) in enumerate(grouped):
ax = _axes[i]
if numeric_only and isinstance(group, DataFrame):
group = group._get_numeric_data()
plotf(group, ax, **kwargs)
ax.set_title(pprint_thing(key))
return fig, axes
def _grouped_plot_by_column(plotf, data, columns=None, by=None,
numeric_only=True, grid=False,
figsize=None, ax=None, layout=None,
return_type=None, **kwargs):
grouped = data.groupby(by)
if columns is None:
if not isinstance(by, (list, tuple)):
by = [by]
columns = data._get_numeric_data().columns.difference(by)
naxes = len(columns)
fig, axes = _subplots(naxes=naxes, sharex=True, sharey=True,
figsize=figsize, ax=ax, layout=layout)
_axes = _flatten(axes)
ax_values = []
for i, col in enumerate(columns):
ax = _axes[i]
gp_col = grouped[col]
keys, values = zip(*gp_col)
re_plotf = plotf(keys, values, ax, **kwargs)
ax.set_title(col)
ax.set_xlabel(pprint_thing(by))
ax_values.append(re_plotf)
ax.grid(grid)
from pandas.core.series import Series
result = Series(ax_values, index=columns)
# Return axes in multiplot case, maybe revisit later # 985
if return_type is None:
result = axes
byline = by[0] if len(by) == 1 else by
fig.suptitle('Boxplot grouped by %s' % byline)
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2)
return result
class BasePlotMethods(PandasObject):
def __init__(self, data):
self._data = data
def __call__(self, *args, **kwargs):
raise NotImplementedError
class SeriesPlotMethods(BasePlotMethods):
"""Series plotting accessor and method
Examples
--------
>>> s.plot.line()
>>> s.plot.bar()
>>> s.plot.hist()
Plotting methods can also be accessed by calling the accessor as a method
with the ``kind`` argument:
``s.plot(kind='line')`` is equivalent to ``s.plot.line()``
"""
def __call__(self, kind='line', ax=None,
figsize=None, use_index=True, title=None, grid=None,
legend=False, style=None, logx=False, logy=False,
loglog=False, xticks=None, yticks=None,
xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
label=None, secondary_y=False, **kwds):
return plot_series(self._data, kind=kind, ax=ax, figsize=figsize,
use_index=use_index, title=title, grid=grid,
legend=legend, style=style, logx=logx, logy=logy,
loglog=loglog, xticks=xticks, yticks=yticks,
xlim=xlim, ylim=ylim, rot=rot, fontsize=fontsize,
colormap=colormap, table=table, yerr=yerr,
xerr=xerr, label=label, secondary_y=secondary_y,
**kwds)
__call__.__doc__ = plot_series.__doc__
def line(self, **kwds):
"""
Line plot
.. versionadded:: 0.17.0
Parameters
----------
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='line', **kwds)
def bar(self, **kwds):
"""
Vertical bar plot
.. versionadded:: 0.17.0
Parameters
----------
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='bar', **kwds)
def barh(self, **kwds):
"""
Horizontal bar plot
.. versionadded:: 0.17.0
Parameters
----------
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='barh', **kwds)
def box(self, **kwds):
"""
Boxplot
.. versionadded:: 0.17.0
Parameters
----------
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='box', **kwds)
def hist(self, bins=10, **kwds):
"""
Histogram
.. versionadded:: 0.17.0
Parameters
----------
bins: integer, default 10
Number of histogram bins to be used
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='hist', bins=bins, **kwds)
def kde(self, **kwds):
"""
Kernel Density Estimate plot
.. versionadded:: 0.17.0
Parameters
----------
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='kde', **kwds)
density = kde
def area(self, **kwds):
"""
Area plot
.. versionadded:: 0.17.0
Parameters
----------
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='area', **kwds)
def pie(self, **kwds):
"""
Pie chart
.. versionadded:: 0.17.0
Parameters
----------
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='pie', **kwds)
class FramePlotMethods(BasePlotMethods):
"""DataFrame plotting accessor and method
Examples
--------
>>> df.plot.line()
>>> df.plot.scatter('x', 'y')
>>> df.plot.hexbin()
These plotting methods can also be accessed by calling the accessor as a
method with the ``kind`` argument:
``df.plot(kind='line')`` is equivalent to ``df.plot.line()``
"""
def __call__(self, x=None, y=None, kind='line', ax=None,
subplots=False, sharex=None, sharey=False, layout=None,
figsize=None, use_index=True, title=None, grid=None,
legend=True, style=None, logx=False, logy=False, loglog=False,
xticks=None, yticks=None, xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
secondary_y=False, sort_columns=False, **kwds):
return plot_frame(self._data, kind=kind, x=x, y=y, ax=ax,
subplots=subplots, sharex=sharex, sharey=sharey,
layout=layout, figsize=figsize, use_index=use_index,
title=title, grid=grid, legend=legend, style=style,
logx=logx, logy=logy, loglog=loglog, xticks=xticks,
yticks=yticks, xlim=xlim, ylim=ylim, rot=rot,
fontsize=fontsize, colormap=colormap, table=table,
yerr=yerr, xerr=xerr, secondary_y=secondary_y,
sort_columns=sort_columns, **kwds)
__call__.__doc__ = plot_frame.__doc__
def line(self, x=None, y=None, **kwds):
"""
Line plot
.. versionadded:: 0.17.0
Parameters
----------
x, y : label or position, optional
Coordinates for each point.
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='line', x=x, y=y, **kwds)
def bar(self, x=None, y=None, **kwds):
"""
Vertical bar plot
.. versionadded:: 0.17.0
Parameters
----------
x, y : label or position, optional
Coordinates for each point.
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='bar', x=x, y=y, **kwds)
def barh(self, x=None, y=None, **kwds):
"""
Horizontal bar plot
.. versionadded:: 0.17.0
Parameters
----------
x, y : label or position, optional
Coordinates for each point.
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='barh', x=x, y=y, **kwds)
def box(self, by=None, **kwds):
r"""
Boxplot
.. versionadded:: 0.17.0
Parameters
----------
by : string or sequence
Column in the DataFrame to group by.
\*\*kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='box', by=by, **kwds)
def hist(self, by=None, bins=10, **kwds):
"""
Histogram
.. versionadded:: 0.17.0
Parameters
----------
by : string or sequence
Column in the DataFrame to group by.
bins: integer, default 10
Number of histogram bins to be used
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='hist', by=by, bins=bins, **kwds)
def kde(self, **kwds):
"""
Kernel Density Estimate plot
.. versionadded:: 0.17.0
Parameters
----------
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='kde', **kwds)
density = kde
def area(self, x=None, y=None, **kwds):
"""
Area plot
.. versionadded:: 0.17.0
Parameters
----------
x, y : label or position, optional
Coordinates for each point.
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='area', x=x, y=y, **kwds)
def pie(self, y=None, **kwds):
"""
Pie chart
.. versionadded:: 0.17.0
Parameters
----------
y : label or position, optional
Column to plot.
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='pie', y=y, **kwds)
def scatter(self, x, y, s=None, c=None, **kwds):
"""
Scatter plot
.. versionadded:: 0.17.0
Parameters
----------
x, y : label or position, optional
Coordinates for each point.
s : scalar or array_like, optional
Size of each point.
c : label or position, optional
Color of each point.
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='scatter', x=x, y=y, c=c, s=s, **kwds)
def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None,
**kwds):
"""
Hexbin plot
.. versionadded:: 0.17.0
Parameters
----------
x, y : label or position, optional
Coordinates for each point.
C : label or position, optional
The value at each `(x, y)` point.
reduce_C_function : callable, optional
Function of one argument that reduces all the values in a bin to
a single number (e.g. `mean`, `max`, `sum`, `std`).
gridsize : int, optional
Number of bins.
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
if reduce_C_function is not None:
kwds['reduce_C_function'] = reduce_C_function
if gridsize is not None:
kwds['gridsize'] = gridsize
return self(kind='hexbin', x=x, y=y, C=C, **kwds)
| apache-2.0 |
dennissergeev/classcode | lib/bright_lapse_toa.py | 1 | 7464 | """
modify day22_radiance.py so that the lapse rate dT_dz is a vector
Loop over the set of lapse rates and save the TOA radiances
for our retrieval exercise
"""
import numpy as np
import matplotlib.pyplot as plt
from planck import planckwavelen,planckInvert
#
# OrderedDict keeps keys in order
#
from collections import OrderedDict
try:
import seaborn
except:
pass
def hydrostat(T_surf,p_surf,dT_dz,delta_z,num_levels):
"""
build a hydrostatic atmosphere by integrating the hydrostatic equation from the surface,
using num_layers=num_levels-1 of constant thickness delta_z
input: T_surf -- surface temperature in K
p_surf -- surface pressure in Pa
dT_dz -- constant rate of temperature change with height in K/m
delta_z -- layer thickness in m
num_levels -- number of levels in the atmosphere
output:
numpy arrays: Temp (K) , press (Pa), rho (kg/m^3), height (m)
"""
Rd=287. #J/kg/K -- gas constant for dry air
g=9.8 #m/s^2
Temp=np.empty([num_levels])
press=np.empty_like(Temp)
rho=np.empty_like(Temp)
height=np.empty_like(Temp)
#
# layer 0 sits directly above the surface, so start
# with pressure, temp of air equal to ground temp, press
# and get density from equaiton of state
#
press[0]=p_surf
Temp[0]=T_surf
rho[0]=p_surf/(Rd*T_surf)
height[0]=0
num_layers=num_levels-1
#now march up the atmosphere a layer at a time
for i in range(num_layers):
delP= -rho[i]*g*delta_z
height[i+1] = height[i] + delta_z
Temp[i+1] = Temp[i] + dT_dz*delta_z
press[i+1]= press[i] + delP
rho[i+1]=press[i+1]/(Rd*Temp[i+1])
return (Temp,press,rho,height)
def find_tau(r_gas,k_lambda,rho,height):
"""
input: r_gas -- gas mixing ratio in kg/kg
k_lambda -- mass absorption coefficient in kg/m^2
rho -- vector of air densities in kg/m^3
height -- corresponding layer heights in m
output: tau -- vetical optical depth from the surface, same shape as rho
"""
tau=np.empty_like(rho)
tau[0]=0
num_levels=len(rho)
num_layers=num_levels-1
for index in range(num_layers):
delta_z=height[index+1] - height[index]
delta_tau=r_gas*rho[index]*k_lambda*delta_z
tau[index+1]=tau[index] + delta_tau
return tau
def top_radiance(tau,Temp,height,T_surf,wavel,k_lambda):
"""Input:
tau: vector of level optical depths
Temp: vector of level temperatures (K)
height: vector of level heights (m)
T_surf: temperature of black surface (K)
the_lambda: wavelength (m)
k_lambda: mass absorption coefficient (m^2/kg)
Output:
top_rad: radiance at top of atmosphere (W/m^2/micron/sr)
"""
sfc_rad=planckwavelen(wavel,T_surf)
up_rad=sfc_rad
print "-"*60
print "wavelength: %8.2f microns" % (wavel*1.e6)
print "surface radiation: %8.2f W/m^2/micron/sr" % (up_rad*1.e-6)
print "total tau: %8.2f" % tau[-1]
print "-"*60
tot_levs=len(tau)
for index in np.arange(1,tot_levs):
upper_lev=index
lower_lev=index - 1
del_tau=tau[upper_lev] - tau[lower_lev]
trans=np.exp(-del_tau)
emiss=1 - trans
layer_rad=emiss*planckwavelen(wavel,Temp[lower_lev])
#
# find the radiance at the next level
#
up_rad=trans*up_rad + layer_rad
return up_rad
if __name__=="__main__":
r_gas=0.01 #kg/kg
T_surf=300 #K
p_surf=100.e3 #Pa
dT_dz= np.arange(-9.e-3,-4.e-3,0.5e-3)
delta_z=25000/7
num_levels=7
#
# try to duplicate weighting functions for WH fig. 4.33
#
wavenums=np.linspace(666,766,7)
wavelengths=(1/wavenums)*1.e4 #microns
#
# we want most absorbing channel at 15 microns, so reverse
# the order of the wavelengths so that 15 microns is
# at the end
#
wavelengths=wavelengths[::-1]
rad_profs=[]
bright_profs=[]
for the_lapse_rate in dT_dz:
print "looping: "
Temp,press,rho,height=hydrostat(T_surf,p_surf,the_lapse_rate,delta_z,num_levels)
#
# I played around with the magnitude of k_lambda until the weighting functions
# peaked at a range of heights
#
k_lambda=np.array([0.002,0.003,0.006,0.010,0.012,0.016,0.020])*5.
wavel_k_tup=zip(wavelengths,k_lambda)
rad_dict=OrderedDict()
bright_dict=OrderedDict()
for wavel,k_lambda in wavel_k_tup:
tau=find_tau(r_gas,k_lambda,rho,height)
#convert wavel to meters
rad_value=top_radiance(tau,Temp,height,T_surf,wavel*1.e-6,k_lambda)
rad_dict[wavel]=rad_value
bright_dict[wavel]=planckInvert(wavel*1.e-6,rad_value)
rad_profs.append(rad_dict)
bright_profs.append(bright_dict)
plt.close('all')
fig1,axis1=plt.subplots(1,1)
for index,the_profile in enumerate(rad_profs):
wavelengths=the_profile.keys()
radiances=np.array(the_profile.values())
radiances=radiances/radiances.mean()
#
# convert dT_dz to K/km
#
axis1.plot(wavelengths,radiances,label=str(dT_dz[index]*1.e3))
axis1.set_title('normalized radiances at top of atmosphere for {} values of dT/dz (K/km)'.format(len(dT_dz)))
axis1.set_ylabel('normalized radiances (no units)')
axis1.set_xlabel('wavelength (microns)')
axis1.legend(loc='best')
fig1.savefig('normalized_radiances.png')
fig2,axis2=plt.subplots(1,1)
for index,the_profile in enumerate(bright_profs):
wavelengths=the_profile.keys()
brights=np.array(the_profile.values())
#
# convert dT_dz to K/km
#
axis2.plot(wavelengths,brights,label=str(dT_dz[index]*1.e3))
axis2.set_title('brightness temperatures at top of atmosphere for {} values of dT/dz (K/km)'.format(len(dT_dz)))
axis2.set_ylabel('Brightness temperature (K)')
axis2.set_xlabel('wavelength (microns)')
axis2.legend(loc='best')
fig2.savefig('brightness.png')
fig3,axis3=plt.subplots(1,1)
lapse_rate=[]
diff_list=[]
for index,the_profile in enumerate(bright_profs):
brights=np.array(the_profile.values())
#
# dT_dz in K/km
#
lapse_rate.append(dT_dz[index]*1.e3)
diff_list.append(brights[0] - brights[-1])
axis3.plot(lapse_rate,diff_list)
axis3.set_title('13 $\mu m$ - 15 $\mu m$ brightness temperature difference (K)')
axis3.set_ylabel('Brightness temperature differnce (K)')
axis3.set_xlabel('lapse rate (K/km)')
fig3.savefig('temp_diff.png')
fig4,axis4=plt.subplots(1,1)
lapse_rate=[]
diff_list=[]
for index,the_profile in enumerate(bright_profs):
brights=np.array(the_profile.values())
#
# dT_dz in K/km
#
lapse_rate.append(dT_dz[index]*1.e3)
diff_list.append(2.*(brights[0] - brights[-1])/(brights[0] + brights[-1]))
axis4.plot(lapse_rate,diff_list)
axis4.set_title('$(T_{13} - T_{15})/ ( \overline{T_{bright}} )$')
axis4.set_ylabel('normalized brightness temperature differnce (no units)')
axis4.set_xlabel('lapse rate (K/km)')
fig4.savefig('normalized_temp_diff.png')
plt.show()
| cc0-1.0 |
imaculate/scikit-learn | sklearn/cluster/tests/test_birch.py | 342 | 5603 | """
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
| bsd-3-clause |
suiyuan2009/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/in_memory_source_test.py | 62 | 3960 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests NumpySource and PandasSource."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.transforms import in_memory_source
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class NumpySourceTestCase(test.TestCase):
def testNumpySource(self):
batch_size = 3
iterations = 1000
array = np.arange(32).reshape([16, 2])
numpy_source = in_memory_source.NumpySource(array, batch_size=batch_size)
index_column = numpy_source().index
value_column = numpy_source().value
cache = {}
with ops.Graph().as_default():
value_tensor = value_column.build(cache)
index_tensor = index_column.build(cache)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(iterations):
expected_index = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_value = get_rows(array, expected_index)
actual_index, actual_value = sess.run([index_tensor, value_tensor])
np.testing.assert_array_equal(expected_index, actual_index)
np.testing.assert_array_equal(expected_value, actual_value)
coord.request_stop()
coord.join(threads)
class PandasSourceTestCase(test.TestCase):
def testPandasFeeding(self):
if not HAS_PANDAS:
return
batch_size = 3
iterations = 1000
index = np.arange(100, 132)
a = np.arange(32)
b = np.arange(32, 64)
dataframe = pd.DataFrame({"a": a, "b": b}, index=index)
pandas_source = in_memory_source.PandasSource(
dataframe, batch_size=batch_size)
pandas_columns = pandas_source()
cache = {}
with ops.Graph().as_default():
pandas_tensors = [col.build(cache) for col in pandas_columns]
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(iterations):
indices = [
j % dataframe.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = dataframe.index[indices]
expected_rows = dataframe.iloc[indices]
actual_value = sess.run(pandas_tensors)
np.testing.assert_array_equal(expected_df_indices, actual_value[0])
for col_num, col in enumerate(dataframe.columns):
np.testing.assert_array_equal(expected_rows[col].values,
actual_value[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
IndraVikas/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 230 | 5234 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
| bsd-3-clause |
derrowap/MA490-MachineLearning-FinalProject | threadTesting.py | 1 | 2773 | from tensorflow.contrib import skflow
from sklearn.metrics import accuracy_score
from sklearn.cross_validation import train_test_split
from trainingFunctions import addThem
import numpy as np
import threading
class speciesThread(threading.Thread):
def __init__(self, ID):
threading.Thread.__init__(self)
self.ID = ID
# self.x = x
# self.y = y
self.error = 2
def run(self):
self.error = train(self.ID)
print("Species %d finished with testing error %f"
% (self.ID, self.error))
# goal = 0.01
# current = 1
# numThreads = 60
# while current > goal:
def trainNet(index):
# numThreads = high - low + 1
x = np.zeros((10000 ** 2, 2))
y = np.zeros(10000 ** 2)
count = 0
for i in range(1, 10000):
for j in range(i, 10000):
x[count] = [i, j]
y[count] = addThem(i, j)
count += 1
# x_train, x_test, y_train, y_test = train_test_split(x, y,
# test_size=0.2, random_state=0)
generations = 0
NN = skflow.TensorFlowEstimator.restore('/home/derrowap/models/addThem'+str(index))
bestError = 2
while bestError > 0.001:
generations += 1
# pool = [speciesThread(i, x, y) for i in range(low, high+1)]
# for species in pool:
# species.start()
# for species in pool:
# species.join()
# error = [2] * numThreads
# for i in range(numThreads):
# error[i] = pool[i].error
# NN = skflow.TensorFlowEstimator.restore('/home/derrowap/models/addThem'+str(ID))
NN.fit(x, y)
pred = NN.predict(x)
pred = np.reshape(pred, -1)
pred = np.rint(pred)
error_test = 1 - accuracy_score(y, pred)
# Update best error so far
bestError = min(bestError, error_test)
# bestIndex = np.argmin(error)
# bestNN = skflow.TensorFlowEstimator.restore('/home/derrowap/models/addThem'+str(pool[bestIndex].ID))
# for i in range(low, high+1):
# bestNN.save('/home/derrowap/models/addThem'+str(i))
print("Error on generation %d: %f" % (generations, error_test))
print("Best error so far: %f" % bestError)
print("Finished generation %d, continuing...." % generations)
print("Finished training! Error %f, generations %d." % (bestError, generations))
def train(ID):
# Neural Network from skflow
try:
NN = skflow.TensorFlowEstimator.restore('/home/derrowap/models/addThem'+str(ID))
except:
print("ID %d didn't load" % ID)
NN = skflow.TensorFlowDNNRegressor(hidden_units=[2], steps=100000)
# Train the NN with training data
NN.fit(x, y)
# Calculates training error
# pred = NN.predict(x_train)
# pred = np.reshape(pred, -1)
# pred = np.rint(pred)
# error_train = 1 - accuracy_score(y_train, pred)
# Calculates testing error
pred = NN.predict(x)
pred = np.reshape(pred, -1)
pred = np.rint(pred)
error_test = 1 - accuracy_score(y, pred)
NN.save('/home/derrowap/models/addThem'+str(ID))
return(error_test) | mit |
barbagroup/conferences | GTC2016/plots/scripts/PoissonN100M2D.py | 1 | 2330 | '''
Nx = 10000, Ny = 10000, 2D Poisson Benchamrks
'''
import numpy
from matplotlib import pyplot
pyplot.style.use('style')
nCPU = numpy.array([16, 32, 64, 128])
Hypre = numpy.array([27.125073, 15.487189, 7.924572, 4.094746])
nGPU = numpy.array([32])
AmgX = numpy.array([1.538988])
#nTheo = numpy.array([])
#AmgXTheo = numpy.array([])
# ===========================
# plot
# ===========================
fig = pyplot.figure(figsize=(10, 5))
ax = fig.gca()
color_cycle = ax._get_lines.prop_cycler
start = 1
tickLoc = []
tickLabel = []
bw = 0.5
# Hypre
bLoc = numpy.arange(start, nCPU.size+start) - 0.25
ax.bar(bLoc, Hypre, bw, label="Hypre BoomerAMG (Intel E5-2670)",
color=next(color_cycle)['color'], lw=0, zorder=0)
## speed up text
for i in range(nCPU.size):
ax.annotate(str(numpy.around(Hypre[0]/Hypre[i], 1))+"x",
xy=(bLoc[i], Hypre[i]),
xytext=(bLoc[i], Hypre[i]+0.5),
fontsize=18, weight="bold", color="red")
start += (nCPU.size+1)
tickLoc += list(bLoc+0.5)
tickLabel += [str(i)+" CPU" for i in nCPU]
# AmgX / ColonialOne
bLoc = numpy.arange(start, nGPU.size+start) - 0.25
ax.bar(bLoc, AmgX, bw, label="AmgX Classical AMG (NVIDIA K20)",
color=next(color_cycle)['color'], lw=0, zorder=0)
## speed up text
for i in range(nGPU.size):
ax.annotate(str(numpy.around(Hypre[0]/AmgX[i], 1))+"x",
xy=(bLoc[i], AmgX[i]),
xytext=(bLoc[i], AmgX[i]+0.5),
fontsize=18, weight="bold", color="red")
start += (nGPU.size+1)
tickLoc += list(bLoc+0.5)
tickLabel += [str(i)+" GPU" for i in nGPU]
# AmgX / Theo(K40c)
#bLoc = numpy.arange(start, nTheo.size+start) - 0.25
#
#ax.bar(bLoc, AmgXTheo, bw, label="AmgX Classical AMG (Workstation)",
# color=next(color_cycle)['color'], lw=0, zorder=0)
#
#start += (nTheo.size+1)
#tickLoc += list(bLoc+0.5)
#tickLabel += [str(i)+" GPU" for i in nTheo]
# config the figure
ax.set_title("2D Poisson, 100M Unknowns", fontsize=22)
#ax.set_ylim(0, 45)
ax.set_ylabel("Time (sec)", fontsize=20)
ax.yaxis.grid(zorder=0)
ax.set_xlim(0, start-1)
ax.set_xticks(tickLoc)
ax.set_xticklabels(tickLabel, fontsize=16, rotation=315)
bars, labels = ax.get_legend_handles_labels()
ax.legend(bars, labels, loc=0, ncol=1, fontsize=16)
pyplot.tight_layout()
pyplot.savefig("Poisson_N100M2D.png")
| mit |
chugunovyar/factoryForBuild | env/lib/python2.7/site-packages/matplotlib/quiver.py | 6 | 45113 | """
Support for plotting vector fields.
Presently this contains Quiver and Barb. Quiver plots an arrow in the
direction of the vector, with the size of the arrow related to the
magnitude of the vector.
Barbs are like quiver in that they point along a vector, but
the magnitude of the vector is given schematically by the presence of barbs
or flags on the barb.
This will also become a home for things such as standard
deviation ellipses, which can and will be derived very easily from
the Quiver code.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import weakref
import numpy as np
from numpy import ma
import matplotlib.collections as mcollections
import matplotlib.transforms as transforms
import matplotlib.text as mtext
import matplotlib.artist as martist
from matplotlib.artist import allow_rasterization
from matplotlib import docstring
import matplotlib.font_manager as font_manager
import matplotlib.cbook as cbook
from matplotlib.cbook import delete_masked_points
from matplotlib.patches import CirclePolygon
import math
_quiver_doc = """
Plot a 2-D field of arrows.
call signatures::
quiver(U, V, **kw)
quiver(U, V, C, **kw)
quiver(X, Y, U, V, **kw)
quiver(X, Y, U, V, C, **kw)
Arguments:
*X*, *Y*:
The x and y coordinates of the arrow locations (default is tail of
arrow; see *pivot* kwarg)
*U*, *V*:
Give the x and y components of the arrow vectors
*C*:
An optional array used to map colors to the arrows
All arguments may be 1-D or 2-D arrays or sequences. If *X* and *Y*
are absent, they will be generated as a uniform grid. If *U* and *V*
are 2-D arrays but *X* and *Y* are 1-D, and if ``len(X)`` and ``len(Y)``
match the column and row dimensions of *U*, then *X* and *Y* will be
expanded with :func:`numpy.meshgrid`.
*U*, *V*, *C* may be masked arrays, but masked *X*, *Y* are not
supported at present.
Keyword arguments:
*units*: [ 'width' | 'height' | 'dots' | 'inches' | 'x' | 'y' | 'xy' ]
Arrow units; the arrow dimensions *except for length* are in
multiples of this unit.
* 'width' or 'height': the width or height of the axes
* 'dots' or 'inches': pixels or inches, based on the figure dpi
* 'x', 'y', or 'xy': *X*, *Y*, or sqrt(X^2+Y^2) data units
The arrows scale differently depending on the units. For
'x' or 'y', the arrows get larger as one zooms in; for other
units, the arrow size is independent of the zoom state. For
'width or 'height', the arrow size increases with the width and
height of the axes, respectively, when the window is resized;
for 'dots' or 'inches', resizing does not change the arrows.
*angles*: [ 'uv' | 'xy' | array ]
With the default 'uv', the arrow axis aspect ratio is 1, so that
if *U*==*V* the orientation of the arrow on the plot is 45 degrees
CCW from the horizontal axis (positive to the right).
With 'xy', the arrow points from (x,y) to (x+u, y+v).
Use this for plotting a gradient field, for example.
Alternatively, arbitrary angles may be specified as an array
of values in degrees, CCW from the horizontal axis.
Note: inverting a data axis will correspondingly invert the
arrows *only* with `angles='xy'`.
*scale*: [ *None* | float ]
Data units per arrow length unit, e.g., m/s per plot width; a smaller
scale parameter makes the arrow longer. If *None*, a simple
autoscaling algorithm is used, based on the average vector length
and the number of vectors. The arrow length unit is given by
the *scale_units* parameter
*scale_units*: *None*, or any of the *units* options.
For example, if *scale_units* is 'inches', *scale* is 2.0, and
``(u,v) = (1,0)``, then the vector will be 0.5 inches long.
If *scale_units* is 'width', then the vector will be half the width
of the axes.
If *scale_units* is 'x' then the vector will be 0.5 x-axis
units. To plot vectors in the x-y plane, with u and v having
the same units as x and y, use
"angles='xy', scale_units='xy', scale=1".
*width*:
Shaft width in arrow units; default depends on choice of units,
above, and number of vectors; a typical starting value is about
0.005 times the width of the plot.
*headwidth*: scalar
Head width as multiple of shaft width, default is 3
*headlength*: scalar
Head length as multiple of shaft width, default is 5
*headaxislength*: scalar
Head length at shaft intersection, default is 4.5
*minshaft*: scalar
Length below which arrow scales, in units of head length. Do not
set this to less than 1, or small arrows will look terrible!
Default is 1
*minlength*: scalar
Minimum length as a multiple of shaft width; if an arrow length
is less than this, plot a dot (hexagon) of this diameter instead.
Default is 1.
*pivot*: [ 'tail' | 'mid' | 'middle' | 'tip' ]
The part of the arrow that is at the grid point; the arrow rotates
about this point, hence the name *pivot*.
*color*: [ color | color sequence ]
This is a synonym for the
:class:`~matplotlib.collections.PolyCollection` facecolor kwarg.
If *C* has been set, *color* has no effect.
The defaults give a slightly swept-back arrow; to make the head a
triangle, make *headaxislength* the same as *headlength*. To make the
arrow more pointed, reduce *headwidth* or increase *headlength* and
*headaxislength*. To make the head smaller relative to the shaft,
scale down all the head parameters. You will probably do best to leave
minshaft alone.
linewidths and edgecolors can be used to customize the arrow
outlines. Additional :class:`~matplotlib.collections.PolyCollection`
keyword arguments:
%(PolyCollection)s
""" % docstring.interpd.params
_quiverkey_doc = """
Add a key to a quiver plot.
Call signature::
quiverkey(Q, X, Y, U, label, **kw)
Arguments:
*Q*:
The Quiver instance returned by a call to quiver.
*X*, *Y*:
The location of the key; additional explanation follows.
*U*:
The length of the key
*label*:
A string with the length and units of the key
Keyword arguments:
*coordinates* = [ 'axes' | 'figure' | 'data' | 'inches' ]
Coordinate system and units for *X*, *Y*: 'axes' and 'figure' are
normalized coordinate systems with 0,0 in the lower left and 1,1
in the upper right; 'data' are the axes data coordinates (used for
the locations of the vectors in the quiver plot itself); 'inches'
is position in the figure in inches, with 0,0 at the lower left
corner.
*color*:
overrides face and edge colors from *Q*.
*labelpos* = [ 'N' | 'S' | 'E' | 'W' ]
Position the label above, below, to the right, to the left of the
arrow, respectively.
*labelsep*:
Distance in inches between the arrow and the label. Default is
0.1
*labelcolor*:
defaults to default :class:`~matplotlib.text.Text` color.
*fontproperties*:
A dictionary with keyword arguments accepted by the
:class:`~matplotlib.font_manager.FontProperties` initializer:
*family*, *style*, *variant*, *size*, *weight*
Any additional keyword arguments are used to override vector
properties taken from *Q*.
The positioning of the key depends on *X*, *Y*, *coordinates*, and
*labelpos*. If *labelpos* is 'N' or 'S', *X*, *Y* give the position
of the middle of the key arrow. If *labelpos* is 'E', *X*, *Y*
positions the head, and if *labelpos* is 'W', *X*, *Y* positions the
tail; in either of these two cases, *X*, *Y* is somewhere in the
middle of the arrow+label key object.
"""
class QuiverKey(martist.Artist):
""" Labelled arrow for use as a quiver plot scale key."""
halign = {'N': 'center', 'S': 'center', 'E': 'left', 'W': 'right'}
valign = {'N': 'bottom', 'S': 'top', 'E': 'center', 'W': 'center'}
pivot = {'N': 'middle', 'S': 'middle', 'E': 'tip', 'W': 'tail'}
def __init__(self, Q, X, Y, U, label, **kw):
martist.Artist.__init__(self)
self.Q = Q
self.X = X
self.Y = Y
self.U = U
self.coord = kw.pop('coordinates', 'axes')
self.color = kw.pop('color', None)
self.label = label
self._labelsep_inches = kw.pop('labelsep', 0.1)
self.labelsep = (self._labelsep_inches * Q.ax.figure.dpi)
# try to prevent closure over the real self
weak_self = weakref.ref(self)
def on_dpi_change(fig):
self_weakref = weak_self()
if self_weakref is not None:
self_weakref.labelsep = (self_weakref._labelsep_inches*fig.dpi)
self_weakref._initialized = False # simple brute force update
# works because _init is
# called at the start of
# draw.
self._cid = Q.ax.figure.callbacks.connect('dpi_changed',
on_dpi_change)
self.labelpos = kw.pop('labelpos', 'N')
self.labelcolor = kw.pop('labelcolor', None)
self.fontproperties = kw.pop('fontproperties', dict())
self.kw = kw
_fp = self.fontproperties
# boxprops = dict(facecolor='red')
self.text = mtext.Text(
text=label, # bbox=boxprops,
horizontalalignment=self.halign[self.labelpos],
verticalalignment=self.valign[self.labelpos],
fontproperties=font_manager.FontProperties(**_fp))
if self.labelcolor is not None:
self.text.set_color(self.labelcolor)
self._initialized = False
self.zorder = Q.zorder + 0.1
def remove(self):
"""
Overload the remove method
"""
self.Q.ax.figure.callbacks.disconnect(self._cid)
self._cid = None
# pass the remove call up the stack
martist.Artist.remove(self)
__init__.__doc__ = _quiverkey_doc
def _init(self):
if True: # not self._initialized:
if not self.Q._initialized:
self.Q._init()
self._set_transform()
_pivot = self.Q.pivot
self.Q.pivot = self.pivot[self.labelpos]
# Hack: save and restore the Umask
_mask = self.Q.Umask
self.Q.Umask = ma.nomask
self.verts = self.Q._make_verts(np.array([self.U]),
np.zeros((1,)))
self.Q.Umask = _mask
self.Q.pivot = _pivot
kw = self.Q.polykw
kw.update(self.kw)
self.vector = mcollections.PolyCollection(
self.verts,
offsets=[(self.X, self.Y)],
transOffset=self.get_transform(),
**kw)
if self.color is not None:
self.vector.set_color(self.color)
self.vector.set_transform(self.Q.get_transform())
self.vector.set_figure(self.get_figure())
self._initialized = True
def _text_x(self, x):
if self.labelpos == 'E':
return x + self.labelsep
elif self.labelpos == 'W':
return x - self.labelsep
else:
return x
def _text_y(self, y):
if self.labelpos == 'N':
return y + self.labelsep
elif self.labelpos == 'S':
return y - self.labelsep
else:
return y
@allow_rasterization
def draw(self, renderer):
self._init()
self.vector.draw(renderer)
x, y = self.get_transform().transform_point((self.X, self.Y))
self.text.set_x(self._text_x(x))
self.text.set_y(self._text_y(y))
self.text.draw(renderer)
self.stale = False
def _set_transform(self):
if self.coord == 'data':
self.set_transform(self.Q.ax.transData)
elif self.coord == 'axes':
self.set_transform(self.Q.ax.transAxes)
elif self.coord == 'figure':
self.set_transform(self.Q.ax.figure.transFigure)
elif self.coord == 'inches':
self.set_transform(self.Q.ax.figure.dpi_scale_trans)
else:
raise ValueError('unrecognized coordinates')
def set_figure(self, fig):
martist.Artist.set_figure(self, fig)
self.text.set_figure(fig)
def contains(self, mouseevent):
# Maybe the dictionary should allow one to
# distinguish between a text hit and a vector hit.
if (self.text.contains(mouseevent)[0] or
self.vector.contains(mouseevent)[0]):
return True, {}
return False, {}
quiverkey_doc = _quiverkey_doc
# This is a helper function that parses out the various combination of
# arguments for doing colored vector plots. Pulling it out here
# allows both Quiver and Barbs to use it
def _parse_args(*args):
X, Y, U, V, C = [None] * 5
args = list(args)
# The use of atleast_1d allows for handling scalar arguments while also
# keeping masked arrays
if len(args) == 3 or len(args) == 5:
C = np.atleast_1d(args.pop(-1))
V = np.atleast_1d(args.pop(-1))
U = np.atleast_1d(args.pop(-1))
if U.ndim == 1:
nr, nc = 1, U.shape[0]
else:
nr, nc = U.shape
if len(args) == 2: # remaining after removing U,V,C
X, Y = [np.array(a).ravel() for a in args]
if len(X) == nc and len(Y) == nr:
X, Y = [a.ravel() for a in np.meshgrid(X, Y)]
else:
indexgrid = np.meshgrid(np.arange(nc), np.arange(nr))
X, Y = [np.ravel(a) for a in indexgrid]
return X, Y, U, V, C
def _check_consistent_shapes(*arrays):
all_shapes = set(a.shape for a in arrays)
if len(all_shapes) != 1:
raise ValueError('The shapes of the passed in arrays do not match.')
class Quiver(mcollections.PolyCollection):
"""
Specialized PolyCollection for arrows.
The only API method is set_UVC(), which can be used
to change the size, orientation, and color of the
arrows; their locations are fixed when the class is
instantiated. Possibly this method will be useful
in animations.
Much of the work in this class is done in the draw()
method so that as much information as possible is available
about the plot. In subsequent draw() calls, recalculation
is limited to things that might have changed, so there
should be no performance penalty from putting the calculations
in the draw() method.
"""
_PIVOT_VALS = ('tail', 'mid', 'middle', 'tip')
@docstring.Substitution(_quiver_doc)
def __init__(self, ax, *args, **kw):
"""
The constructor takes one required argument, an Axes
instance, followed by the args and kwargs described
by the following pylab interface documentation:
%s
"""
self.ax = ax
X, Y, U, V, C = _parse_args(*args)
self.X = X
self.Y = Y
self.XY = np.hstack((X[:, np.newaxis], Y[:, np.newaxis]))
self.N = len(X)
self.scale = kw.pop('scale', None)
self.headwidth = kw.pop('headwidth', 3)
self.headlength = float(kw.pop('headlength', 5))
self.headaxislength = kw.pop('headaxislength', 4.5)
self.minshaft = kw.pop('minshaft', 1)
self.minlength = kw.pop('minlength', 1)
self.units = kw.pop('units', 'width')
self.scale_units = kw.pop('scale_units', None)
self.angles = kw.pop('angles', 'uv')
self.width = kw.pop('width', None)
self.color = kw.pop('color', 'k')
pivot = kw.pop('pivot', 'tail').lower()
# validate pivot
if pivot not in self._PIVOT_VALS:
raise ValueError(
'pivot must be one of {keys}, you passed {inp}'.format(
keys=self._PIVOT_VALS, inp=pivot))
# normalize to 'middle'
if pivot == 'mid':
pivot = 'middle'
self.pivot = pivot
self.transform = kw.pop('transform', ax.transData)
kw.setdefault('facecolors', self.color)
kw.setdefault('linewidths', (0,))
mcollections.PolyCollection.__init__(self, [], offsets=self.XY,
transOffset=self.transform,
closed=False,
**kw)
self.polykw = kw
self.set_UVC(U, V, C)
self._initialized = False
self.keyvec = None
self.keytext = None
# try to prevent closure over the real self
weak_self = weakref.ref(self)
def on_dpi_change(fig):
self_weakref = weak_self()
if self_weakref is not None:
self_weakref._new_UV = True # vertices depend on width, span
# which in turn depend on dpi
self_weakref._initialized = False # simple brute force update
# works because _init is
# called at the start of
# draw.
self._cid = self.ax.figure.callbacks.connect('dpi_changed',
on_dpi_change)
def remove(self):
"""
Overload the remove method
"""
# disconnect the call back
self.ax.figure.callbacks.disconnect(self._cid)
self._cid = None
# pass the remove call up the stack
mcollections.PolyCollection.remove(self)
def _init(self):
"""
Initialization delayed until first draw;
allow time for axes setup.
"""
# It seems that there are not enough event notifications
# available to have this work on an as-needed basis at present.
if True: # not self._initialized:
trans = self._set_transform()
ax = self.ax
sx, sy = trans.inverted().transform_point(
(ax.bbox.width, ax.bbox.height))
self.span = sx
if self.width is None:
sn = max(8, min(25, math.sqrt(self.N)))
self.width = 0.06 * self.span / sn
# _make_verts sets self.scale if not already specified
if not self._initialized and self.scale is None:
self._make_verts(self.U, self.V)
self._initialized = True
def get_datalim(self, transData):
trans = self.get_transform()
transOffset = self.get_offset_transform()
full_transform = (trans - transData) + (transOffset - transData)
XY = full_transform.transform(self.XY)
bbox = transforms.Bbox.null()
bbox.update_from_data_xy(XY, ignore=True)
return bbox
@allow_rasterization
def draw(self, renderer):
self._init()
verts = self._make_verts(self.U, self.V)
self.set_verts(verts, closed=False)
self._new_UV = False
mcollections.PolyCollection.draw(self, renderer)
self.stale = False
def set_UVC(self, U, V, C=None):
# We need to ensure we have a copy, not a reference
# to an array that might change before draw().
U = ma.masked_invalid(U, copy=True).ravel()
V = ma.masked_invalid(V, copy=True).ravel()
mask = ma.mask_or(U.mask, V.mask, copy=False, shrink=True)
if C is not None:
C = ma.masked_invalid(C, copy=True).ravel()
mask = ma.mask_or(mask, C.mask, copy=False, shrink=True)
if mask is ma.nomask:
C = C.filled()
else:
C = ma.array(C, mask=mask, copy=False)
self.U = U.filled(1)
self.V = V.filled(1)
self.Umask = mask
if C is not None:
self.set_array(C)
self._new_UV = True
self.stale = True
def _dots_per_unit(self, units):
"""
Return a scale factor for converting from units to pixels
"""
ax = self.ax
if units in ('x', 'y', 'xy'):
if units == 'x':
dx0 = ax.viewLim.width
dx1 = ax.bbox.width
elif units == 'y':
dx0 = ax.viewLim.height
dx1 = ax.bbox.height
else: # 'xy' is assumed
dxx0 = ax.viewLim.width
dxx1 = ax.bbox.width
dyy0 = ax.viewLim.height
dyy1 = ax.bbox.height
dx1 = np.hypot(dxx1, dyy1)
dx0 = np.hypot(dxx0, dyy0)
dx = dx1 / dx0
else:
if units == 'width':
dx = ax.bbox.width
elif units == 'height':
dx = ax.bbox.height
elif units == 'dots':
dx = 1.0
elif units == 'inches':
dx = ax.figure.dpi
else:
raise ValueError('unrecognized units')
return dx
def _set_transform(self):
"""
Sets the PolygonCollection transform to go
from arrow width units to pixels.
"""
dx = self._dots_per_unit(self.units)
self._trans_scale = dx # pixels per arrow width unit
trans = transforms.Affine2D().scale(dx)
self.set_transform(trans)
return trans
def _angles_lengths(self, U, V, eps=1):
xy = self.ax.transData.transform(self.XY)
uv = np.hstack((U[:, np.newaxis], V[:, np.newaxis]))
xyp = self.ax.transData.transform(self.XY + eps * uv)
dxy = xyp - xy
angles = np.arctan2(dxy[:, 1], dxy[:, 0])
lengths = np.absolute(dxy[:, 0] + dxy[:, 1] * 1j) / eps
return angles, lengths
def _make_verts(self, U, V):
uv = (U + V * 1j)
str_angles = isinstance(self.angles, six.string_types)
if str_angles and (self.angles == 'xy' and self.scale_units == 'xy'):
# Here eps is 1 so that if we get U, V by diffing
# the X, Y arrays, the vectors will connect the
# points, regardless of the axis scaling (including log).
angles, lengths = self._angles_lengths(U, V, eps=1)
elif str_angles and (self.angles == 'xy' or self.scale_units == 'xy'):
# Calculate eps based on the extents of the plot
# so that we don't end up with roundoff error from
# adding a small number to a large.
eps = np.abs(self.ax.dataLim.extents).max() * 0.001
angles, lengths = self._angles_lengths(U, V, eps=eps)
if self.scale_units == 'xy':
a = lengths
else:
a = np.absolute(uv)
if self.scale is None:
sn = max(10, math.sqrt(self.N))
if self.Umask is not ma.nomask:
amean = a[~self.Umask].mean()
else:
amean = a.mean()
# crude auto-scaling
# scale is typical arrow length as a multiple of the arrow width
scale = 1.8 * amean * sn / self.span
if self.scale_units is None:
if self.scale is None:
self.scale = scale
widthu_per_lenu = 1.0
else:
if self.scale_units == 'xy':
dx = 1
else:
dx = self._dots_per_unit(self.scale_units)
widthu_per_lenu = dx / self._trans_scale
if self.scale is None:
self.scale = scale * widthu_per_lenu
length = a * (widthu_per_lenu / (self.scale * self.width))
X, Y = self._h_arrows(length)
if str_angles and (self.angles == 'xy'):
theta = angles
elif str_angles and (self.angles == 'uv'):
theta = np.angle(uv)
else:
# Make a copy to avoid changing the input array.
theta = ma.masked_invalid(self.angles, copy=True).filled(0)
theta = theta.ravel()
theta *= (np.pi / 180.0)
theta.shape = (theta.shape[0], 1) # for broadcasting
xy = (X + Y * 1j) * np.exp(1j * theta) * self.width
xy = xy[:, :, np.newaxis]
XY = np.concatenate((xy.real, xy.imag), axis=2)
if self.Umask is not ma.nomask:
XY = ma.array(XY)
XY[self.Umask] = ma.masked
# This might be handled more efficiently with nans, given
# that nans will end up in the paths anyway.
return XY
def _h_arrows(self, length):
""" length is in arrow width units """
# It might be possible to streamline the code
# and speed it up a bit by using complex (x,y)
# instead of separate arrays; but any gain would be slight.
minsh = self.minshaft * self.headlength
N = len(length)
length = length.reshape(N, 1)
# This number is chosen based on when pixel values overflow in Agg
# causing rendering errors
# length = np.minimum(length, 2 ** 16)
np.clip(length, 0, 2 ** 16, out=length)
# x, y: normal horizontal arrow
x = np.array([0, -self.headaxislength,
-self.headlength, 0],
np.float64)
x = x + np.array([0, 1, 1, 1]) * length
y = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
y = np.repeat(y[np.newaxis, :], N, axis=0)
# x0, y0: arrow without shaft, for short vectors
x0 = np.array([0, minsh - self.headaxislength,
minsh - self.headlength, minsh], np.float64)
y0 = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
ii = [0, 1, 2, 3, 2, 1, 0, 0]
X = x.take(ii, 1)
Y = y.take(ii, 1)
Y[:, 3:-1] *= -1
X0 = x0.take(ii)
Y0 = y0.take(ii)
Y0[3:-1] *= -1
shrink = length / minsh if minsh != 0. else 0.
X0 = shrink * X0[np.newaxis, :]
Y0 = shrink * Y0[np.newaxis, :]
short = np.repeat(length < minsh, 8, axis=1)
# Now select X0, Y0 if short, otherwise X, Y
cbook._putmask(X, short, X0)
cbook._putmask(Y, short, Y0)
if self.pivot == 'middle':
X -= 0.5 * X[:, 3, np.newaxis]
elif self.pivot == 'tip':
X = X - X[:, 3, np.newaxis] # numpy bug? using -= does not
# work here unless we multiply
# by a float first, as with 'mid'.
elif self.pivot != 'tail':
raise ValueError(("Quiver.pivot must have value in {{'middle', "
"'tip', 'tail'}} not {0}").format(self.pivot))
tooshort = length < self.minlength
if tooshort.any():
# Use a heptagonal dot:
th = np.arange(0, 8, 1, np.float64) * (np.pi / 3.0)
x1 = np.cos(th) * self.minlength * 0.5
y1 = np.sin(th) * self.minlength * 0.5
X1 = np.repeat(x1[np.newaxis, :], N, axis=0)
Y1 = np.repeat(y1[np.newaxis, :], N, axis=0)
tooshort = np.repeat(tooshort, 8, 1)
cbook._putmask(X, tooshort, X1)
cbook._putmask(Y, tooshort, Y1)
# Mask handling is deferred to the caller, _make_verts.
return X, Y
quiver_doc = _quiver_doc
_barbs_doc = """
Plot a 2-D field of barbs.
Call signatures::
barb(U, V, **kw)
barb(U, V, C, **kw)
barb(X, Y, U, V, **kw)
barb(X, Y, U, V, C, **kw)
Arguments:
*X*, *Y*:
The x and y coordinates of the barb locations
(default is head of barb; see *pivot* kwarg)
*U*, *V*:
Give the x and y components of the barb shaft
*C*:
An optional array used to map colors to the barbs
All arguments may be 1-D or 2-D arrays or sequences. If *X* and *Y*
are absent, they will be generated as a uniform grid. If *U* and *V*
are 2-D arrays but *X* and *Y* are 1-D, and if ``len(X)`` and ``len(Y)``
match the column and row dimensions of *U*, then *X* and *Y* will be
expanded with :func:`numpy.meshgrid`.
*U*, *V*, *C* may be masked arrays, but masked *X*, *Y* are not
supported at present.
Keyword arguments:
*length*:
Length of the barb in points; the other parts of the barb
are scaled against this.
Default is 9
*pivot*: [ 'tip' | 'middle' ]
The part of the arrow that is at the grid point; the arrow rotates
about this point, hence the name *pivot*. Default is 'tip'
*barbcolor*: [ color | color sequence ]
Specifies the color all parts of the barb except any flags. This
parameter is analagous to the *edgecolor* parameter for polygons,
which can be used instead. However this parameter will override
facecolor.
*flagcolor*: [ color | color sequence ]
Specifies the color of any flags on the barb. This parameter is
analagous to the *facecolor* parameter for polygons, which can be
used instead. However this parameter will override facecolor. If
this is not set (and *C* has not either) then *flagcolor* will be
set to match *barbcolor* so that the barb has a uniform color. If
*C* has been set, *flagcolor* has no effect.
*sizes*:
A dictionary of coefficients specifying the ratio of a given
feature to the length of the barb. Only those values one wishes to
override need to be included. These features include:
- 'spacing' - space between features (flags, full/half barbs)
- 'height' - height (distance from shaft to top) of a flag or
full barb
- 'width' - width of a flag, twice the width of a full barb
- 'emptybarb' - radius of the circle used for low magnitudes
*fill_empty*:
A flag on whether the empty barbs (circles) that are drawn should
be filled with the flag color. If they are not filled, they will
be drawn such that no color is applied to the center. Default is
False
*rounding*:
A flag to indicate whether the vector magnitude should be rounded
when allocating barb components. If True, the magnitude is
rounded to the nearest multiple of the half-barb increment. If
False, the magnitude is simply truncated to the next lowest
multiple. Default is True
*barb_increments*:
A dictionary of increments specifying values to associate with
different parts of the barb. Only those values one wishes to
override need to be included.
- 'half' - half barbs (Default is 5)
- 'full' - full barbs (Default is 10)
- 'flag' - flags (default is 50)
*flip_barb*:
Either a single boolean flag or an array of booleans. Single
boolean indicates whether the lines and flags should point
opposite to normal for all barbs. An array (which should be the
same size as the other data arrays) indicates whether to flip for
each individual barb. Normal behavior is for the barbs and lines
to point right (comes from wind barbs having these features point
towards low pressure in the Northern Hemisphere.) Default is
False
Barbs are traditionally used in meteorology as a way to plot the speed
and direction of wind observations, but can technically be used to
plot any two dimensional vector quantity. As opposed to arrows, which
give vector magnitude by the length of the arrow, the barbs give more
quantitative information about the vector magnitude by putting slanted
lines or a triangle for various increments in magnitude, as show
schematically below::
: /\ \\
: / \ \\
: / \ \ \\
: / \ \ \\
: ------------------------------
.. note the double \\ at the end of each line to make the figure
.. render correctly
The largest increment is given by a triangle (or "flag"). After those
come full lines (barbs). The smallest increment is a half line. There
is only, of course, ever at most 1 half line. If the magnitude is
small and only needs a single half-line and no full lines or
triangles, the half-line is offset from the end of the barb so that it
can be easily distinguished from barbs with a single full line. The
magnitude for the barb shown above would nominally be 65, using the
standard increments of 50, 10, and 5.
linewidths and edgecolors can be used to customize the barb.
Additional :class:`~matplotlib.collections.PolyCollection` keyword
arguments:
%(PolyCollection)s
""" % docstring.interpd.params
docstring.interpd.update(barbs_doc=_barbs_doc)
class Barbs(mcollections.PolyCollection):
'''
Specialized PolyCollection for barbs.
The only API method is :meth:`set_UVC`, which can be used to
change the size, orientation, and color of the arrows. Locations
are changed using the :meth:`set_offsets` collection method.
Possibly this method will be useful in animations.
There is one internal function :meth:`_find_tails` which finds
exactly what should be put on the barb given the vector magnitude.
From there :meth:`_make_barbs` is used to find the vertices of the
polygon to represent the barb based on this information.
'''
# This may be an abuse of polygons here to render what is essentially maybe
# 1 triangle and a series of lines. It works fine as far as I can tell
# however.
@docstring.interpd
def __init__(self, ax, *args, **kw):
"""
The constructor takes one required argument, an Axes
instance, followed by the args and kwargs described
by the following pylab interface documentation:
%(barbs_doc)s
"""
self._pivot = kw.pop('pivot', 'tip')
self._length = kw.pop('length', 7)
barbcolor = kw.pop('barbcolor', None)
flagcolor = kw.pop('flagcolor', None)
self.sizes = kw.pop('sizes', dict())
self.fill_empty = kw.pop('fill_empty', False)
self.barb_increments = kw.pop('barb_increments', dict())
self.rounding = kw.pop('rounding', True)
self.flip = kw.pop('flip_barb', False)
transform = kw.pop('transform', ax.transData)
# Flagcolor and and barbcolor provide convenience parameters for
# setting the facecolor and edgecolor, respectively, of the barb
# polygon. We also work here to make the flag the same color as the
# rest of the barb by default
if None in (barbcolor, flagcolor):
kw['edgecolors'] = 'face'
if flagcolor:
kw['facecolors'] = flagcolor
elif barbcolor:
kw['facecolors'] = barbcolor
else:
# Set to facecolor passed in or default to black
kw.setdefault('facecolors', 'k')
else:
kw['edgecolors'] = barbcolor
kw['facecolors'] = flagcolor
# Explicitly set a line width if we're not given one, otherwise
# polygons are not outlined and we get no barbs
if 'linewidth' not in kw and 'lw' not in kw:
kw['linewidth'] = 1
# Parse out the data arrays from the various configurations supported
x, y, u, v, c = _parse_args(*args)
self.x = x
self.y = y
xy = np.hstack((x[:, np.newaxis], y[:, np.newaxis]))
# Make a collection
barb_size = self._length ** 2 / 4 # Empirically determined
mcollections.PolyCollection.__init__(self, [], (barb_size,),
offsets=xy,
transOffset=transform, **kw)
self.set_transform(transforms.IdentityTransform())
self.set_UVC(u, v, c)
def _find_tails(self, mag, rounding=True, half=5, full=10, flag=50):
'''
Find how many of each of the tail pieces is necessary. Flag
specifies the increment for a flag, barb for a full barb, and half for
half a barb. Mag should be the magnitude of a vector (i.e., >= 0).
This returns a tuple of:
(*number of flags*, *number of barbs*, *half_flag*, *empty_flag*)
*half_flag* is a boolean whether half of a barb is needed,
since there should only ever be one half on a given
barb. *empty_flag* flag is an array of flags to easily tell if
a barb is empty (too low to plot any barbs/flags.
'''
# If rounding, round to the nearest multiple of half, the smallest
# increment
if rounding:
mag = half * (mag / half + 0.5).astype(np.int)
num_flags = np.floor(mag / flag).astype(np.int)
mag = np.mod(mag, flag)
num_barb = np.floor(mag / full).astype(np.int)
mag = np.mod(mag, full)
half_flag = mag >= half
empty_flag = ~(half_flag | (num_flags > 0) | (num_barb > 0))
return num_flags, num_barb, half_flag, empty_flag
def _make_barbs(self, u, v, nflags, nbarbs, half_barb, empty_flag, length,
pivot, sizes, fill_empty, flip):
'''
This function actually creates the wind barbs. *u* and *v*
are components of the vector in the *x* and *y* directions,
respectively.
*nflags*, *nbarbs*, and *half_barb*, empty_flag* are,
*respectively, the number of flags, number of barbs, flag for
*half a barb, and flag for empty barb, ostensibly obtained
*from :meth:`_find_tails`.
*length* is the length of the barb staff in points.
*pivot* specifies the point on the barb around which the
entire barb should be rotated. Right now, valid options are
'head' and 'middle'.
*sizes* is a dictionary of coefficients specifying the ratio
of a given feature to the length of the barb. These features
include:
- *spacing*: space between features (flags, full/half
barbs)
- *height*: distance from shaft of top of a flag or full
barb
- *width* - width of a flag, twice the width of a full barb
- *emptybarb* - radius of the circle used for low
magnitudes
*fill_empty* specifies whether the circle representing an
empty barb should be filled or not (this changes the drawing
of the polygon).
*flip* is a flag indicating whether the features should be flipped to
the other side of the barb (useful for winds in the southern
hemisphere.
This function returns list of arrays of vertices, defining a polygon
for each of the wind barbs. These polygons have been rotated to
properly align with the vector direction.
'''
# These control the spacing and size of barb elements relative to the
# length of the shaft
spacing = length * sizes.get('spacing', 0.125)
full_height = length * sizes.get('height', 0.4)
full_width = length * sizes.get('width', 0.25)
empty_rad = length * sizes.get('emptybarb', 0.15)
# Controls y point where to pivot the barb.
pivot_points = dict(tip=0.0, middle=-length / 2.)
# Check for flip
if flip:
full_height = -full_height
endx = 0.0
endy = pivot_points[pivot.lower()]
# Get the appropriate angle for the vector components. The offset is
# due to the way the barb is initially drawn, going down the y-axis.
# This makes sense in a meteorological mode of thinking since there 0
# degrees corresponds to north (the y-axis traditionally)
angles = -(ma.arctan2(v, u) + np.pi / 2)
# Used for low magnitude. We just get the vertices, so if we make it
# out here, it can be reused. The center set here should put the
# center of the circle at the location(offset), rather than at the
# same point as the barb pivot; this seems more sensible.
circ = CirclePolygon((0, 0), radius=empty_rad).get_verts()
if fill_empty:
empty_barb = circ
else:
# If we don't want the empty one filled, we make a degenerate
# polygon that wraps back over itself
empty_barb = np.concatenate((circ, circ[::-1]))
barb_list = []
for index, angle in np.ndenumerate(angles):
# If the vector magnitude is too weak to draw anything, plot an
# empty circle instead
if empty_flag[index]:
# We can skip the transform since the circle has no preferred
# orientation
barb_list.append(empty_barb)
continue
poly_verts = [(endx, endy)]
offset = length
# Add vertices for each flag
for i in range(nflags[index]):
# The spacing that works for the barbs is a little to much for
# the flags, but this only occurs when we have more than 1
# flag.
if offset != length:
offset += spacing / 2.
poly_verts.extend(
[[endx, endy + offset],
[endx + full_height, endy - full_width / 2 + offset],
[endx, endy - full_width + offset]])
offset -= full_width + spacing
# Add vertices for each barb. These really are lines, but works
# great adding 3 vertices that basically pull the polygon out and
# back down the line
for i in range(nbarbs[index]):
poly_verts.extend(
[(endx, endy + offset),
(endx + full_height, endy + offset + full_width / 2),
(endx, endy + offset)])
offset -= spacing
# Add the vertices for half a barb, if needed
if half_barb[index]:
# If the half barb is the first on the staff, traditionally it
# is offset from the end to make it easy to distinguish from a
# barb with a full one
if offset == length:
poly_verts.append((endx, endy + offset))
offset -= 1.5 * spacing
poly_verts.extend(
[(endx, endy + offset),
(endx + full_height / 2, endy + offset + full_width / 4),
(endx, endy + offset)])
# Rotate the barb according the angle. Making the barb first and
# then rotating it made the math for drawing the barb really easy.
# Also, the transform framework makes doing the rotation simple.
poly_verts = transforms.Affine2D().rotate(-angle).transform(
poly_verts)
barb_list.append(poly_verts)
return barb_list
def set_UVC(self, U, V, C=None):
self.u = ma.masked_invalid(U, copy=False).ravel()
self.v = ma.masked_invalid(V, copy=False).ravel()
if C is not None:
c = ma.masked_invalid(C, copy=False).ravel()
x, y, u, v, c = delete_masked_points(self.x.ravel(),
self.y.ravel(),
self.u, self.v, c)
_check_consistent_shapes(x, y, u, v, c)
else:
x, y, u, v = delete_masked_points(self.x.ravel(), self.y.ravel(),
self.u, self.v)
_check_consistent_shapes(x, y, u, v)
magnitude = np.hypot(u, v)
flags, barbs, halves, empty = self._find_tails(magnitude,
self.rounding,
**self.barb_increments)
# Get the vertices for each of the barbs
plot_barbs = self._make_barbs(u, v, flags, barbs, halves, empty,
self._length, self._pivot, self.sizes,
self.fill_empty, self.flip)
self.set_verts(plot_barbs)
# Set the color array
if C is not None:
self.set_array(c)
# Update the offsets in case the masked data changed
xy = np.hstack((x[:, np.newaxis], y[:, np.newaxis]))
self._offsets = xy
self.stale = True
def set_offsets(self, xy):
"""
Set the offsets for the barb polygons. This saves the offsets passed
in and actually sets version masked as appropriate for the existing
U/V data. *offsets* should be a sequence.
ACCEPTS: sequence of pairs of floats
"""
self.x = xy[:, 0]
self.y = xy[:, 1]
x, y, u, v = delete_masked_points(self.x.ravel(), self.y.ravel(),
self.u, self.v)
_check_consistent_shapes(x, y, u, v)
xy = np.hstack((x[:, np.newaxis], y[:, np.newaxis]))
mcollections.PolyCollection.set_offsets(self, xy)
self.stale = True
set_offsets.__doc__ = mcollections.PolyCollection.set_offsets.__doc__
barbs_doc = _barbs_doc
| gpl-3.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/core/computation/engines.py | 7 | 3745 | """
Engine classes for :func:`~pandas.eval`
"""
import abc
from pandas import compat
from pandas.compat import map
import pandas.io.formats.printing as printing
from pandas.core.computation.align import _align, _reconstruct_object
from pandas.core.computation.ops import (
UndefinedVariableError,
_mathops, _reductions)
_ne_builtins = frozenset(_mathops + _reductions)
class NumExprClobberingError(NameError):
pass
def _check_ne_builtin_clash(expr):
"""Attempt to prevent foot-shooting in a helpful way.
Parameters
----------
terms : Term
Terms can contain
"""
names = expr.names
overlap = names & _ne_builtins
if overlap:
s = ', '.join(map(repr, overlap))
raise NumExprClobberingError('Variables in expression "%s" '
'overlap with builtins: (%s)' % (expr, s))
class AbstractEngine(object):
"""Object serving as a base class for all engines."""
__metaclass__ = abc.ABCMeta
has_neg_frac = False
def __init__(self, expr):
self.expr = expr
self.aligned_axes = None
self.result_type = None
def convert(self):
"""Convert an expression for evaluation.
Defaults to return the expression as a string.
"""
return printing.pprint_thing(self.expr)
def evaluate(self):
"""Run the engine on the expression
This method performs alignment which is necessary no matter what engine
is being used, thus its implementation is in the base class.
Returns
-------
obj : object
The result of the passed expression.
"""
if not self._is_aligned:
self.result_type, self.aligned_axes = _align(self.expr.terms)
# make sure no names in resolvers and locals/globals clash
res = self._evaluate()
return _reconstruct_object(self.result_type, res, self.aligned_axes,
self.expr.terms.return_type)
@property
def _is_aligned(self):
return self.aligned_axes is not None and self.result_type is not None
@abc.abstractmethod
def _evaluate(self):
"""Return an evaluated expression.
Parameters
----------
env : Scope
The local and global environment in which to evaluate an
expression.
Notes
-----
Must be implemented by subclasses.
"""
pass
class NumExprEngine(AbstractEngine):
"""NumExpr engine class"""
has_neg_frac = True
def __init__(self, expr):
super(NumExprEngine, self).__init__(expr)
def convert(self):
return str(super(NumExprEngine, self).convert())
def _evaluate(self):
import numexpr as ne
# convert the expression to a valid numexpr expression
s = self.convert()
try:
env = self.expr.env
scope = env.full_scope
truediv = scope['truediv']
_check_ne_builtin_clash(self.expr)
return ne.evaluate(s, local_dict=scope, truediv=truediv)
except KeyError as e:
# python 3 compat kludge
try:
msg = e.message
except AttributeError:
msg = compat.text_type(e)
raise UndefinedVariableError(msg)
class PythonEngine(AbstractEngine):
"""Evaluate an expression in Python space.
Mostly for testing purposes.
"""
has_neg_frac = False
def __init__(self, expr):
super(PythonEngine, self).__init__(expr)
def evaluate(self):
return self.expr()
def _evaluate(self):
pass
_engines = {'numexpr': NumExprEngine, 'python': PythonEngine}
| mit |
saketkc/statsmodels | statsmodels/sandbox/nonparametric/kde2.py | 34 | 3158 | # -*- coding: utf-8 -*-
from __future__ import print_function
from statsmodels.compat.python import lzip, zip
import numpy as np
from . import kernels
#TODO: should this be a function?
class KDE(object):
"""
Kernel Density Estimator
Parameters
----------
x : array-like
N-dimensional array from which the density is to be estimated
kernel : Kernel Class
Should be a class from *
"""
#TODO: amend docs for Nd case?
def __init__(self, x, kernel=None):
x = np.asarray(x)
if x.ndim == 1:
x = x[:,None]
nobs, n_series = x.shape
if kernel is None:
kernel = kernels.Gaussian() # no meaningful bandwidth yet
if n_series > 1:
if isinstance( kernel, kernels.CustomKernel ):
kernel = kernels.NdKernel(n_series, kernels = kernel)
self.kernel = kernel
self.n = n_series #TODO change attribute
self.x = x
def density(self, x):
return self.kernel.density(self.x, x)
def __call__(self, x, h="scott"):
return np.array([self.density(xx) for xx in x])
def evaluate(self, x, h="silverman"):
density = self.kernel.density
return np.array([density(xx) for xx in x])
if __name__ == "__main__":
from numpy import random
import matplotlib.pyplot as plt
import statsmodels.nonparametric.bandwidths as bw
from statsmodels.sandbox.nonparametric.testdata import kdetest
# 1-D case
random.seed(142)
x = random.standard_t(4.2, size = 50)
h = bw.bw_silverman(x)
#NOTE: try to do it with convolution
support = np.linspace(-10,10,512)
kern = kernels.Gaussian(h = h)
kde = KDE( x, kern)
print(kde.density(1.015469))
print(0.2034675)
Xs = np.arange(-10,10,0.1)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(Xs, kde(Xs), "-")
ax.set_ylim(-10, 10)
ax.set_ylim(0,0.4)
# 2-D case
x = lzip(kdetest.faithfulData["eruptions"], kdetest.faithfulData["waiting"])
x = np.array(x)
x = (x - x.mean(0))/x.std(0)
nobs = x.shape[0]
H = kdetest.Hpi
kern = kernels.NdKernel( 2 )
kde = KDE( x, kern )
print(kde.density( np.matrix( [1,2 ]))) #.T
plt.figure()
plt.plot(x[:,0], x[:,1], 'o')
n_grid = 50
xsp = np.linspace(x.min(0)[0], x.max(0)[0], n_grid)
ysp = np.linspace(x.min(0)[1], x.max(0)[1], n_grid)
# xsorted = np.sort(x)
# xlow = xsorted[nobs/4]
# xupp = xsorted[3*nobs/4]
# xsp = np.linspace(xlow[0], xupp[0], n_grid)
# ysp = np.linspace(xlow[1], xupp[1], n_grid)
xr, yr = np.meshgrid(xsp, ysp)
kde_vals = np.array([kde.density( np.matrix( [xi, yi ]) ) for xi, yi in
zip(xr.ravel(), yr.ravel())])
plt.contour(xsp, ysp, kde_vals.reshape(n_grid, n_grid))
plt.show()
# 5 D case
# random.seed(142)
# mu = [1.0, 4.0, 3.5, -2.4, 0.0]
# sigma = np.matrix(
# [[ 0.6 - 0.1*abs(i-j) if i != j else 1.0 for j in xrange(5)] for i in xrange(5)])
# x = random.multivariate_normal(mu, sigma, size = 100)
# kern = kernel.Gaussian()
# kde = KernelEstimate( x, kern )
| bsd-3-clause |
tody411/InverseToon | inversetoon/core/intersect.py | 1 | 3227 | # -*- coding: utf-8 -*-
## @package inversetoon.core.intersect
#
# Polyline intersection via internal subdivision.
# @author tody
# @date 2015/08/12
import numpy as np
from scipy.interpolate import UnivariateSpline
import matplotlib.pyplot as plt
from inversetoon.plot.window import showMaximize
def boundingBox(ps):
x_min = np.min(ps[:, 0])
x_max = np.max(ps[:, 0])
y_min = np.min(ps[:, 1])
y_max = np.max(ps[:, 1])
return (x_min, x_max, y_min, y_max)
def boundingBoxIntersect(bb1, bb2):
return bb1[1] > bb2[0] and bb1[0] < bb2[1] and bb1[3] > bb2[2] and bb1[2] < bb2[3]
def lineEquation(p1, p2):
peq1 = np.array([p1[0],p1[1],1])
peq2 = np.array([p2[0],p2[1],1])
return np.cross(peq1, peq2)
def lineIntersect(l1, l2):
leq1 = lineEquation(l1[0], l1[1])
leq2 = lineEquation(l2[0], l2[1])
ipeq = np.cross(leq1, leq2)
if np.abs(ipeq[2]) > 0.0001:
ipeq *= 1.0 / ipeq[2]
ip = np.array([ipeq[0], ipeq[1]])
return ip
def polylineIntersect(ps1, ps2):
num_segment_points = 5
ps1_list = [ps1[i: i + num_segment_points+1] for i in range(0, len(ps1), num_segment_points)]
ps2_list = [ps2[i: i + num_segment_points+1] for i in range(0, len(ps2), num_segment_points)]
intersect_points, bb1_list, bb2_list = np.array(polylineIntersectIter(ps1_list, ps2_list))
return intersect_points, bb1_list, bb2_list
def polylineIntersectIter(ps1_list, ps2_list):
bb1_list = [boundingBox(ps1) for ps1 in ps1_list]
bb2_list = [boundingBox(ps2) for ps2 in ps2_list]
intersect_points = []
for ps1, bb1 in zip(ps1_list, bb1_list):
for ps2, bb2 in zip(ps2_list, bb2_list):
if boundingBoxIntersect(bb1, bb2):
ip = lineIntersect([ps1[0], ps1[-1]], [ps2[0], ps2[-1]])
intersect_points.append(ip)
continue
return np.array(intersect_points), bb1_list, bb2_list
def splinePoints(cvs, num_points=100):
spl = UnivariateSpline(cvs[:, 0], cvs[:, 1])
bb = boundingBox(cvs)
x_new = np.linspace(bb[0], bb[1], num_points)
y_new = spl(x_new)
ps = np.zeros((num_points, 2))
ps[:, 0] = x_new
ps[:, 1] = y_new
return ps
def plotBoundingBox(bb):
bb_points = [(bb[0],bb[2]), (bb[0], bb[3]), (bb[1], bb[3]), (bb[1], bb[2]), (bb[0], bb[2])]
bb_points = np.array(bb_points)
plt.plot(bb_points[:, 0], bb_points[:, 1], "-")
def testIntersection():
#cv1 = np.array([(0.0, 0.0), (1.0, 1.0), (2.0, 1.0), (3.0, 0.0)])
#cv2 = np.array([(0.0, 1.0), (1.0, 0.0), (2.0, 2.0), (3.0, 3.0)])
cv1 = np.random.rand(4, 2)
cv2 = np.random.rand(4, 2)
ps1 = splinePoints(cv1)
ps2 = splinePoints(cv2)
plt.plot(ps1[:, 0], ps1[:, 1], "-")
plt.plot(ps2[:, 0], ps2[:, 1], "-")
intersect_points, bb1_list, bb2_list = polylineIntersect(ps1, ps2)
for bb1, bb2 in zip(bb1_list, bb2_list):
plotBoundingBox(bb1)
plotBoundingBox(bb2)
print intersect_points
print "num_intersects: %s" %len(intersect_points)
if len(intersect_points) > 0:
plt.plot(intersect_points[:, 0], intersect_points[:, 1], "o")
showMaximize()
if __name__ == '__main__':
testIntersection() | mit |
mglerner/IntroToBiophysics | BiologicalPatternFormation/WorkingReactionDiffusion.py | 1 | 3968 | #!/usr/bin/env python
# coding: utf-8
# > This is one of the 100 recipes of the [IPython Cookbook](http://ipython-books.github.io/), the definitive guide to high-performance scientific computing and data science in Python.
#
# Links:
#
# * http://mrob.com/pub/comp/xmorphia/F260/F260-k550.html
# * http://mrob.com/pub/comp/xmorphia/
# # 12.4. Simulating a Partial Differential Equation: reaction-diffusion systems and Turing patterns
# 1. Let's import the packages.
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
Du = 0.00016
Dv = 0.00008
F= 0.0350
k= 0.065
colormap = plt.cm.copper
colormap = plt.cm.jet
size = 100 # size of the 2D grid
dx = 2./size # space step
T = 100.0 # total time
dt = .9 * dx**2/2 # time step
n = int(T/dt)
steps_per_frame = 1000
print "We will show {n} steps, {s} steps per frame".format(n=n,s=steps_per_frame)
n = int(n/steps_per_frame)
# Make life nicer for setting up our initial conditions
def makeblock(data,xcenter,xhalflength,ycenter,yhalflength,value=1):
data[xcenter-xhalflength:xcenter+xhalflength,ycenter-yhalflength:ycenter+yhalflength] = value
if size == 100:
U = np.zeros((size,size))
V = np.zeros((size,size))
if 0:
makeblock(U,15,5,15,5)
makeblock(V,16,5,16,5)
makeblock(U,15,5,30,5)
makeblock(V,16,5,32,5)
elif 0:
makeblock(U,15,5,15,5)
makeblock(U,15,5,30,5)
elif 0:
U = np.random.rand(size, size)
V = np.random.rand(size, size)
elif 0:
makeblock(U,15,5,15,5,0.2)
makeblock(V,16,5,16,5,0.1)
makeblock(V,15,5,15,5)
makeblock(V,15,5,30,5)
else:
for i in range(20):
x,y = np.random.randint(0,size,2)
makeblock(U,x,5,y,5)
makeblock(V,x,5,y,5)
else:
U = np.random.rand(size, size)
V = np.random.rand(size, size)
def laplacian(Z):
Ztop = Z[0:-2,1:-1]
Zleft = Z[1:-1,0:-2]
Zbottom = Z[2:,1:-1]
Zright = Z[1:-1,2:]
Zcenter = Z[1:-1,1:-1]
return (Ztop + Zleft + Zbottom + Zright - 4 * Zcenter) / dx**2
fig = plt.figure()
ax1 = fig.add_subplot(1,2,1)
im1 = plt.imshow(U,cmap=colormap,interpolation='none',vmin=0,vmax=1)
ax2 = fig.add_subplot(1,2,2)
im2 = plt.imshow(V,cmap=colormap,interpolation='none',vmin=0,vmax=1)
#ax = plt.axes(xlim=(0,size),ylim=(0,size))
bbox_props = dict(boxstyle="Round,pad=0.3", fc="cyan", ec="b", lw=2)
description_text = ax1.text(size-40, size-10, "Direction", ha="center", va="center",# rotation=45,
size=15,
bbox=bbox_props)
#description_text = ax.text(5,5,'cows')
def init():
#plt.imshow(U,cmap=colormap,interpolation='none')
description_text.set_text('frogs')
cbar_ax = fig.add_axes([0.90, 0.15, 0.025, 0.7])
fig.colorbar(im1, cax=cbar_ax)
return [im1,im2,description_text]
# In[79]:
# We simulate the PDE with the finite difference method.
def animate(i):
# We compute the Laplacian of u and v.
for ii in range(steps_per_frame):
deltaU = laplacian(U)
deltaV = laplacian(V)
# We take the values of u and v inside the grid.
Uc = U[1:-1,1:-1]
Vc = V[1:-1,1:-1]
# We update the variables.
U[1:-1,1:-1] = Uc + dt * (Du * deltaU - Uc*Vc*Vc + F*(1-Uc))
V[1:-1,1:-1] = Vc + dt * (Dv * deltaV + Uc*Vc*Vc - (F+k)*Vc)
# Neumann conditions: derivatives at the edges
# are null.
for Z in (U, V):
Z[0,:] = Z[1,:]
Z[-1,:] = Z[-2,:]
Z[:,0] = Z[:,1]
Z[:,-1] = Z[:,-2]
im1.set_array(U)
im2.set_array(V)
description_text.set_text('F {F} k {k} i {i}'.format(F=F,k=k,i=i))
return [im1,im2,description_text]
anim = animation.FuncAnimation(fig, animate, init_func=init, repeat=False,
frames=n, interval=20,
#blit=True
)
plt.show()
| apache-2.0 |
bh107/benchpress | benchpress/benchmarks/util.py | 1 | 13871 | from __future__ import print_function
import argparse
import time
import sys
import gzip
import operator
import functools
# In order to support runs without bohrium installed, we need some import hacks. The result is:
# * `np` will point to either Bohrium or Numpy
# * `numpy` will point to Numpy
# * `bohrium` will point to either Bohrium or None
import numpy as np
try:
import numpy_force as numpy
bh_is_loaded_as_np = True
except ImportError:
import numpy as numpy
bh_is_loaded_as_np = False
try:
import bohrium
except ImportError:
bohrium = None
class VisualArgs:
def __init__(self, args):
self.count = -1
self.rate = args.visualize_rate
self.param = args.visualize_param
self.trace = {'org': [], 'zip': []}
self.trace_fname = args.visualize_trace # When None, no tracing
self.dry = args.visualize_dry
class Benchmark:
"""
Helper class to aid running Python/NumPy programs with and without Bohrium.
Use it to sample elapsed time using: start()/stop()
Pretty-prints results using pprint().
start()/stop() will send flush signals to npbackend, ensuring that only
the statements in-between start() and stop() are measured.
"""
def __init__(self, description, size_pattern, delimiter="*"):
self._elapsed = 0.0 # The quantity measured
self._script = sys.argv[0] # The script being run
self.delimiter = delimiter
# Construct argument parser
p = argparse.ArgumentParser(description=description)
p.add_argument('size',
metavar=size_pattern,
help="Tell the script the size of the data to work on."
)
p.add_argument('--dtype',
choices=["uint8", "float32", "float64"],
default="float64",
help="Tell the the script which primitive type to use."
" (default: %(default)s)"
)
p.add_argument('--seed',
default=42,
help="The seed to use when using random data."
)
p.add_argument('--inputfn',
default=None,
help="Input file to use as data.",
metavar="FILE",
type=str,
)
p.add_argument('--outputfn',
default=None,
help="Output file to store results in (.npz extension will "
"be appended to the file name if it is not already there).",
metavar="FILE",
type=str,
)
p.add_argument('--no-extmethods',
default=False,
action='store_true',
help="Disable extension methods."
)
p.add_argument('--visualize',
default=False,
action='store_true',
help="Enable visualization in script."
)
p.add_argument('--visualize-rate',
default=1,
type=int,
help="The rate of visualization (Default: 1, which means all frame)"
)
p.add_argument('--visualize-param',
default=None,
help="Set visualization parameters."
)
p.add_argument('--visualize-trace',
default=None,
type=str,
help="Dump frames to files instead of showing them"
)
p.add_argument('--visualize-dry',
default=False,
action='store_true',
help="Do the data process but don't show any visualization"
)
p.add_argument('--verbose',
default=False,
action='store_true',
help="Print out misc. information from script."
)
p.add_argument('--no-flush',
action='store_true',
help="Disable calls to flush within benchmark iterations."
)
p.add_argument('--no-do_while',
action='store_true',
help="Disable Bohrium's optimized `do_while`."
)
self.args = p.parse_args() # Parse the arguments
self.args.size = [eval(i) for i in self.args.size.split(self.delimiter)] if self.args.size else []
self.dtype = eval("numpy.%s" % self.args.dtype)
if self.args.visualize:
self._visual_args = VisualArgs(self.args)
self.numpy_viz_handle = None # NumPy visualization handle
def flush(self, ignore_no_flush_arg=False):
"""Executes the queued instructions when running through Bohrium. Set `ignore_no_flush_arg=True` to flush
even when the --no-flush argument is used"""
if bh_is_loaded_as_np:
if ignore_no_flush_arg or not self.args.no_flush:
bohrium.flush()
def start(self):
"""Start the timer"""
self.flush()
self._elapsed = time.time()
def stop(self):
"""Stop the timer"""
self.flush()
self._elapsed = time.time() - self._elapsed
def save_data(self, data_dict):
"""Save `data_dict` as a npz archive when --outputfn is used"""
assert (isinstance(data_dict, dict))
if self.args.outputfn is not None:
# Clean `data_dict` for Bohrium arrays
nobh_data = {"_bhary_keys": []}
for k in data_dict.keys():
if hasattr(data_dict[k], "copy2numpy"):
nobh_data[k] = data_dict[k].copy2numpy()
nobh_data["_bhary_keys"].append(k)
else:
nobh_data[k] = data_dict[k]
numpy.savez_compressed(self.args.outputfn, **nobh_data)
def load_data(self):
"""Load the npz archive specified by --inputfn or None is not set"""
if self.args.inputfn is None:
return None
else:
nobh_data = numpy.load(self.args.inputfn)
bhary_keys = nobh_data["_bhary_keys"].tolist()
ret = {}
for k in nobh_data.keys():
if k == "_bhary_keys":
continue
# Convert numpy arrays into bohrium arrays
if bh_is_loaded_as_np and k in bhary_keys:
a = nobh_data[k]
ret[k] = bohrium.array(a, bohrium=True)
else:
ret[k] = nobh_data[k]
return ret
def pprint(self):
"""Print the elapsed time"""
print("%s - bohrium: %s, size: %s, elapsed-time: %f" % (
self._script,
bh_is_loaded_as_np,
'*'.join([str(s) for s in self.args.size]),
self._elapsed
))
self.confirm_exit()
def random_array(self, shape, dtype=None):
"""Return a random array of the given shape and dtype. If dtype is None, the type is determent by
the --dtype command line arguments"""
dtype = self.dtype if dtype is None else dtype
size = functools.reduce(operator.mul, shape)
if issubclass(numpy.dtype(dtype).type, numpy.integer):
if bohrium is not None:
# If bohrium is installed, we always uses the random123 in Bohrium even when running pure NumPy
ret = bohrium.random.randint(1, size=size, bohrium=bh_is_loaded_as_np)
else:
ret = numpy.random.randint(1, size=size)
else:
if bohrium is not None:
# If bohrium is installed, we always uses the random123 in Bohrium even when running pure NumPy
ret = bohrium.random.rand(*shape, bohrium=bh_is_loaded_as_np)
else:
ret = numpy.random.rand(*shape)
return np.array(ret, dtype=dtype)
def do_while(self, func, niters, *args, **kwargs):
"""Implements `bohrium.do_while()` for regular NumPy"""
if bh_is_loaded_as_np and not self.args.visualize and not self.args.no_do_while:
return bohrium.do_while(func, niters, *args, **kwargs)
i = 0
func.__globals__['get_iterator'] = lambda x=0: i + x
def get_grid(*args):
assert(len(args) > 0)
grid = args[::-1]
iterators = ()
for dim, iterations in enumerate(grid):
it = int(i/step_delay) % iterations
step_delay *= iterations
iterators = (it,) + iterators
return iterators
func.__globals__['get_grid'] = lambda args: get_grid(args)
if niters is None:
niters = sys.maxsize
while i < niters:
cond = func(*args, **kwargs)
if cond is not None and not cond:
break
i += 1
self.flush()
return i
def dump_visualization_trace_file(self, field):
fname = "%s_%s.npy.gz" % (self._visual_args.trace_fname, field)
data = np.stack(self._visual_args.trace[field])
del self._visual_args.trace[field]
print("Writing visualization trace file: %s (%s)" % (fname, data.shape))
f = gzip.GzipFile("%s" % fname, "w")
np.save(f, data)
del data
f.close()
def __del__(self):
if hasattr(self, "args"): # If argparse fails, `args` dosn't exist
if self.args.visualize and self._visual_args.trace_fname is not None and bh_is_loaded_as_np:
self.dump_visualization_trace_file("org")
self.dump_visualization_trace_file("zip")
from bohrium import _bh
msg = _bh.message("statistics-detail")
with open("%s_stat.txt" % self._visual_args.trace_fname, "w") as f:
f.write(msg)
def confirm_exit(self, msg="Hit Enter to exit..."):
if self.args.visualize and self._visual_args.trace_fname is None and not self._visual_args.dry:
if sys.version_info[0] == 2:
raw_input(msg)
else:
input(msg)
def plot_surface(self, ary, mode="2d", colormap=0, lowerbound=-200, upperbound=200):
"""Plot the surface `ary` when the --visualize argument is used. """
def surface2d():
if not self.numpy_viz_handle:
import matplotlib.pyplot as plt
plt.figure()
img = plt.imshow(ary, interpolation="nearest", cmap=plt.cm.gray)
plt.show(False)
self.numpy_viz_handle = {
"plt": plt,
"img": img
}
else:
plt = self.numpy_viz_handle["plt"]
img = self.numpy_viz_handle["img"]
plt.ion()
img.set_data(ary)
plt.draw()
def surface3d():
import matplotlib.pyplot as plt
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from mpl_toolkits.mplot3d import axes3d, Axes3D # We need this import for projection='3d' to work
if self.numpy_viz_handle is None:
self.numpy_viz_handle = {
"fig": plt.figure()
}
plt.show(False)
fig = self.numpy_viz_handle["fig"]
ax = fig.gca(projection='3d')
H, W = ary.shape
X = np.arange(0, W, 1)
Y = np.arange(0, H, 1)
X, Y = np.meshgrid(X, Y)
surf = ax.plot_surface(
X, Y, ary, rstride=1, cstride=1, cmap='winter',
linewidth=0, antialiased=False
)
if "surf" in self.numpy_viz_handle:
self.numpy_viz_handle["surf"].remove()
self.numpy_viz_handle["surf"] = surf
ax.set_zlim(0, 10)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
plt.ion()
plt.draw()
if self.args.visualize:
if bh_is_loaded_as_np:
from bohrium import visualization
self._visual_args.count += 1
if not (self._visual_args.count % self._visual_args.rate == 0):
return
if self._visual_args.dry: # We force the visualization process on a dry run
visualization.compressed_copy(ary, param=self._visual_args.param).copy2numpy()
else:
if self._visual_args.trace_fname is None: # We don't show visualization when tracing
visualization.plot_surface(ary, mode, colormap, lowerbound, upperbound, self._visual_args.param)
else:
org = ary.copy2numpy()
compressed = visualization.compressed_copy(ary, param=self._visual_args.param).copy2numpy()
self._visual_args.trace['org'].append(org)
self._visual_args.trace['zip'].append(compressed)
print("plot_surface %s: %s" % (self._visual_args.count, len(self._visual_args.trace['org'])),
file=sys.stderr)
else:
if mode.lower() == "2d":
surface2d()
elif mode.lower() == "3d":
surface3d()
else:
raise Exception("Invalid mode.")
| apache-2.0 |
donkirkby/live-py-plugin | docs/lessons/lesson08_annotate.py | 1 | 2242 | """
Let's annotate some interesting points using the annotate
command. We chose the 2pi/3 value and we want to annotate both
the sine and the cosine. We'll first draw a marker on the curve
as well as a straight dotted line. Then, we'll use the annotate
command to display some text with an arrow.
If your dashed lines don't quite match the goal, try drawing
them in the opposite direction.
:lesson goal file: goal08.py
"""
# Imports
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(-np.pi, np.pi, 256, endpoint=True)
c, s = np.cos(x), np.sin(x)
# Plot cosine using blue color with a continuous line of width 2 (pixels)
plt.plot(x, c, color="blue", linewidth=2.0, linestyle="-", label='cosine')
# Plot sine using red color with a continuous line of width 2 (pixels)
plt.plot(x, s, color="red", linewidth=2.0, linestyle="-", label='sine')
plt.legend(loc='upper left', frameon=False)
t = 2*np.pi/3
plt.plot([t, t/2], [-1, -0.2], color='blue', linewidth=1.5, linestyle="--")
plt.scatter([t/2], [-0.2], 50, color='blue')
plt.annotate(r'$\cos(\frac{2\pi}{3})=-\frac{1}{2}$',
xy=(t, np.cos(t)), xycoords='data',
xytext=(-90, -50), textcoords='offset points', fontsize=16,
arrowprops=dict(arrowstyle="-|>", connectionstyle="arc3,rad=.2"))
plt.plot([t, t], [0, np.sin(t)], color='red', linewidth=1.5, linestyle=":")
plt.scatter([t], [np.sin(t)], 50, color='red')
plt.annotate(r'$\sin(\frac{2\pi}{30})=\frac{\sqrt{3}}{2}$',
xy=(t, np.sin(t)), xycoords='data',
xytext=(+10, +30), textcoords='offset points', fontsize=16,
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2"))
# Set x limits
plt.xlim(x.min()*1.1, x.max()*1.1)
# Set x ticks
plt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi],
[r'$-\pi$', r'$-\pi/2$', r'$0$', r'$+\pi/2$', r'$+\pi$'])
# Set y limits
plt.ylim(c.min()*1.1, c.max()*1.1)
# Set y ticks
plt.yticks([-1, 0, 1])
ax = plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_position(('data', 0))
ax.xaxis.set_ticks_position('bottom')
ax.spines['left'].set_position(('data', 0))
ax.yaxis.set_ticks_position('left')
# Show result on screen
plt.show()
| mit |
SKIRT/PTS | core/plot/memory.py | 1 | 7241 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.plot.memory Contains the MemoryPlotter class, used for creating plots of the memory consumption
# of a SKIRT simulation as a function of time.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import numpy as np
import matplotlib.pyplot as plt
# Import the relevant PTS classes and modules
from ..basics.map import Map
from .plotter import Plotter
from ..basics.log import log
from ..tools import filesystem as fs
# -----------------------------------------------------------------
class MemoryPlotter(Plotter):
"""
This class ...
"""
def __init__(self):
"""
The constructor ...
:return:
"""
# Call the constructor of the base class
super(MemoryPlotter, self).__init__()
# -- Attributes --
# A data structure to store the memory (de)allocation information
self.allocation = None
# -----------------------------------------------------------------
@staticmethod
def default_input():
"""
This function ...
:return:
"""
return "memory.dat"
# -----------------------------------------------------------------
def prepare_data(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Preparing the input data into plottable format ...")
# Get the number of processes
ranks = np.unique(self.table["Process rank"])
assert len(ranks) == max(ranks) + 1
processes = len(ranks)
# Initialize the data structure to contain the memory usage information in plottable format
self.data = [Map({"times": [], "memory": []}) for i in range(processes)]
# Loop over the different entries in the memory table
for i in range(len(self.table)):
# Get the process rank
rank = self.table["Process rank"][i]
# Get the time and memory usage
time = self.table["Simulation time"][i]
memory = self.table["Memory usage"][i]
# Add the data point to the data structure
self.data[rank].times.append(time)
self.data[rank].memory.append(memory)
# Check whether (de)allocation information is present in the memory table
if "Array (de)allocation" in self.table.colnames:
# Initialize the data structure for plotting the memory usage of the root process and the memory
# allocation curve
self.allocation = Map({"times": [], "allocation": [], "cumulative": []})
# Get the mask covering entries that do not contain array (de)allocation information
mask = self.table["Array (de)allocation"].mask
# Check whether the first entry of the table corresponds to the root process
assert self.table["Process rank"][0] == 0
# Create a variable to store the cumulative sum of allocated memory
cumulative_sum = 0.0
# Loop over the different entries in the memory table
for i in range(len(self.table)):
# Get the process rank
rank = self.table["Process rank"][i]
# Only add the contributions from the root process
if rank > 0: break
# If the entry is masked because it does not contain memory allocation information, skip it
if mask[i]: continue
# Get the time and the amount of (de)allocated memory
time = self.table["Simulation time"][i]
allocation = self.table["Array (de)allocation"][i]
# Add the allocated memory to the sum
cumulative_sum += allocation
# Add the data point to the data structure
self.allocation.times.append(time)
self.allocation.allocation.append(allocation)
self.allocation.cumulative.append(cumulative_sum)
# -----------------------------------------------------------------
def plot(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Making the plots ...")
# Make a plot of the memory usage as a function of time
self.plot_memory()
# Make a plot of the memory (de)allocation information, if present
if self.allocation is not None: self.plot_allocation()
# -----------------------------------------------------------------
def plot_memory(self):
"""
This function ...
:return:
"""
# Determine the path to the plot file
plot_path = fs.join(self.output_path, "memory.pdf")
# Initialize figure
plt.figure()
plt.clf()
# Loop over the different processes
for rank in range(len(self.data)):
# Name of the current process
process = "P" + str(rank)
# Plot the memory usage
plt.plot(self.data[rank].times, self.data[rank].memory, label=process)
# Set the axis labels
plt.xlabel("Time (s)", fontsize='large')
plt.ylabel("Memory usage (GB)", fontsize='large')
# Set the plot title
plt.title("Memory consumption")
# Set the legend
if len(self.data) > 16: plt.legend(loc='upper center', ncol=8, bbox_to_anchor=(0.5, -0.1), prop={'size': 8})
else: plt.legend(loc='lower right', ncol=4, prop={'size': 8})
# Save the figure
plt.savefig(plot_path, bbox_inches='tight', pad_inches=0.25)
plt.close()
# -----------------------------------------------------------------
def plot_allocation(self):
"""
This function ...
:return:
"""
# Determine the path to the plot file
plot_path = fs.join(self.output_path, "allocation.pdf")
# Initialize figure
plt.figure()
plt.clf()
# Plot the memory usage of the root process
plt.plot(self.data[0].times, self.data[0].memory, label="total memory usage")
# Plot the memory allocation of the root process
plt.step(self.allocation.times, self.allocation.cumulative, where="post", linestyle="--", label="allocated array memory")
# Set the axis labels
plt.xlabel("Time (s)", fontsize='large')
plt.ylabel("Memory usage (GB)", fontsize='large')
# Set the plot title
plt.title("Memory (de)allocation")
# Set the legend
plt.legend(loc='lower right', prop={'size': 8})
# Save the figure
plt.savefig(plot_path, bbox_inches='tight', pad_inches=0.25)
plt.close()
# -----------------------------------------------------------------
| agpl-3.0 |
Djabbz/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 176 | 12155 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed():
D = pairwise_distances(X)
nn = NearestNeighbors(radius=.9).fit(X)
D_sparse = nn.radius_neighbors_graph(mode='distance')
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(D_sparse,
eps=.8,
min_samples=10,
metric='precomputed')
core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10,
metric='precomputed')
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
jm-begon/scikit-learn | sklearn/feature_selection/__init__.py | 244 | 1088 | """
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression']
| bsd-3-clause |
google/matched_markets | matched_markets/examples/data_simulator.py | 1 | 8550 | # Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Data generator for the linear model."""
from matched_markets.methodology import semantics
from matched_markets.methodology import utils
import numpy as np
import pandas as pd
from scipy import stats
class DataSimulator(object):
r"""Simulates geoexperiment datasets via a basic linear model.
For the model:
$y_{i,t} = \alpha_{sales} m_i + \beta \delta_{i,t} + m_i^\nu \epsilon_{i,t}$
$x_{i,t} = \alpha_{cost} m_i + \delta_{i,t} + m_i^\nu w_{i,t}$
$\delta_{i,t} = m_i I(i \in treat) I(t \in test)$
Where:
* $y$ represents response values
* $x$ represents input (cost) values.
* $N_{ctrl}, N_{treat}$ represent the number of geos in the control and
treatment groups respectively.
* Geo means $m = [1, \ldots, N_{ctrl}, 1, \ldots, N_{treat}]$.
* Heteroskedasticity parameter $\nu$, for example
$\nu=0.5$ for $var(y_i) \propto m_i$
* Causal cost in geo i at time t: $\delta_{i,t}$
* $\epsilon_{i,t} \sim N(0, \sigma^2_{resp})$ normal error
term for response.
* $w_{i,t} \sim N(0, \sigma^2_{cost})$ normal error term for cost.
Example:
```
# Experimental design.
n_control = 50
n_treat = 50
time_pre = 150
time_test = 75
# Linear params.
hetresp = 1.0
hetcost = 0.0
beta = 0.0
# Noise params.
hetsked = 0.0
sig_resp = 1.0
sig_cost = 1.0
# Column names.
df_keys = {
'key_response': 'sales',
'key_cost': 'cost',
'key_group': 'geo.group',
'key_period': 'period',
'key_geo': 'geo',
'key_date': 'date'
}
# Make simulator.
simulator = DataSimulator(n_control, n_treat,
time_pre, time_test,
hetresp, hetcost, beta,
hetsked, sig_resp, sig_cost,
**df_keys)
# Simulate data.
fake_data = simulator.sample()
```
"""
def __init__(self,
n_control, n_treat,
time_pre, time_test, # no cooldown as yet
hetresp, hetcost, beta,
hetsked, sig_resp, sig_cost,
noise_treat_only=False,
seed=None, **kwargs):
"""Creates a data simulator.
Args:
n_control: int. The number of control geos.
n_treat: int. The number of treatment geos.
time_pre: int. The number of pre-test period ticks.
time_test: int. The number of test period ticks.
hetresp: float. The degree of mean response variable heterogeneity.
hetcost: float. The degree of mean cost variable heterogeneity.
beta: float. The iROAS coefficient to be used.
hetsked: float. The degree of heteroskedasticity in cost and response.
sig_resp: float. The noise level in the response variable.
sig_cost: float. The noise level in the cost variable.
noise_treat_only: bool. Whether to add noise only in the treatment period.
seed: int. Sets the seed of the random number generator.
**kwargs: optional sematics for the produced data frame.
"""
# Constants.
self.n_control = n_control
self.n_treat = n_treat
self.time_pre = time_pre
self.time_test = time_test
self.time_total = time_pre + time_test
# Model parameters.
self.hetresp = hetresp
self.hetcost = hetcost
self.beta = beta
self.hetsked = hetsked
self.sig_resp = sig_resp
self.sig_cost = sig_cost
# Derived facts.
self.n_total = self.n_treat + self.n_control
self.col_len = self.n_total * self.time_total
# Extract any column / index name information supplied by the user.
user_df_names = utils.kwarg_subdict('key_', **kwargs)
self._df_names = semantics.DataFrameNameMapping(**user_df_names)
# Options
self.noise_treat_only = noise_treat_only
# Extract any semantics for control / treatment supplied by user.
user_group_semantics = utils.kwarg_subdict('group_', **kwargs)
self._groups = semantics.GroupSemantics(**user_group_semantics)
# Extract any semantics for experimental period supplied by user.
user_period_semantics = utils.kwarg_subdict('period_', **kwargs)
self._periods = semantics.PeriodSemantics(**user_period_semantics)
if seed is None:
seed = np.random.randint(0, 2**32)
self._rng = np.random.RandomState(seed=seed)
def make_period_base(self):
"""Returns a vector indicating test period entries for one geo."""
zeros_pre = np.zeros(self.time_pre)
ones_test = np.ones(self.time_test)
return np.hstack((zeros_pre, ones_test))
def make_geo_sizes(self):
"""Returns a column of geo 'sizes' for constructing heterogeneity."""
sizes_control = np.arange(1, self.n_control + 1)
sizes_treat = np.arange(1, self.n_treat + 1)
sizes = np.hstack((sizes_control, sizes_treat))
return np.kron(sizes, np.ones(self.time_total))
def make_geos(self):
"""Returns a column of geo labels."""
geo_names = np.arange(1, self.n_total + 1)
reps = np.ones(self.time_total)
return np.kron(geo_names, reps)
def make_periods(self):
"""Returns a column indicating experimental period of each entry."""
period_base = self.make_period_base()
return np.kron(np.ones(self.n_total), period_base)
def make_groups(self):
"""Returns a vector of ones at treatment group entries, zero in control."""
control = np.ones(self.n_control * self.time_total, dtype=int)
treatment = 2*np.ones(self.n_treat * self.time_total, dtype=int)
return np.hstack((control, treatment))
def make_cost_causal(self):
"""Returns a column representing the cost caused by the experiment."""
zeros_control = np.zeros(self.n_control)
range_treat = np.arange(1, self.n_treat + 1)
cost_base = np.hstack((zeros_control, range_treat))
period_base = self.make_period_base()
cost_causal = np.kron(cost_base, period_base)
return cost_causal
def make_test_mask(self):
"""Returns a column of ones in test period entries and zeros elsewhere."""
return np.kron(np.ones(self.n_total), self.make_period_base())
def make_noise(self, sig):
"""Returns a vector of additive noise with standard deviation sig."""
sig_multiplier = sig * np.power(self.make_geo_sizes(), self.hetsked)
white_noise = stats.norm.rvs(size=self.col_len, random_state=self._rng)
noise = sig_multiplier * white_noise
if self.noise_treat_only:
noise *= self.make_test_mask()
return noise
def make_cost(self):
"""Returns a sales column for the dataset."""
sizes = self.make_geo_sizes()
cost_default = self.hetcost * sizes
cost_causal = self.make_cost_causal()
return cost_default + cost_causal + self.make_noise(self.sig_cost)
def make_sales(self):
"""Returns a sales column for the dataset."""
sizes = self.make_geo_sizes()
means = self.hetresp * sizes
incr_cost = self.make_cost_causal()
return self.beta * incr_cost + means + self.make_noise(self.sig_resp)
def make_dates(self):
"""Returns an integer column representing dates for the dataset."""
return np.kron(np.ones(self.n_total), np.arange(self.time_total))
def sample(self):
"""Draw a sample dataset from the model.
Returns:
A `pd.DataFrame`.
"""
dates = self.make_dates()
groups = self.make_groups()
periods = self.make_periods()
geos = self.make_geos()
cost = self.make_cost()
sales = self.make_sales()
sizes = self.make_geo_sizes()
data = {
self._df_names.date: dates,
self._df_names.group: groups,
self._df_names.period: periods,
self._df_names.geo: geos,
self._df_names.response: sales,
self._df_names.cost: cost,
'size': sizes,
}
frame = pd.DataFrame(data, index=np.arange(self.col_len))
frame = frame.set_index(self._df_names.geo, append=False)
return frame
| apache-2.0 |
NextThought/pypy-numpy | doc/example.py | 81 | 3581 | """This is the docstring for the example.py module. Modules names should
have short, all-lowercase names. The module name may have underscores if
this improves readability.
Every module should have a docstring at the very top of the file. The
module's docstring may extend over multiple lines. If your docstring does
extend over multiple lines, the closing three quotation marks must be on
a line by itself, preferably preceeded by a blank line.
"""
from __future__ import division, absolute_import, print_function
import os # standard library imports first
# Do NOT import using *, e.g. from numpy import *
#
# Import the module using
#
# import numpy
#
# instead or import individual functions as needed, e.g
#
# from numpy import array, zeros
#
# If you prefer the use of abbreviated module names, we suggest the
# convention used by NumPy itself::
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# These abbreviated names are not to be used in docstrings; users must
# be able to paste and execute docstrings after importing only the
# numpy module itself, unabbreviated.
from my_module import my_func, other_func
def foo(var1, var2, long_var_name='hi') :
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
pass
| bsd-3-clause |
jrleeman/MetPy | examples/gridding/Wind_SLP_Interpolation.py | 8 | 4425 | # Copyright (c) 2016,2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Wind and Sea Level Pressure Interpolation
=========================================
Interpolate sea level pressure, as well as wind component data,
to make a consistent looking analysis, featuring contours of pressure and wind barbs.
"""
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from matplotlib.colors import BoundaryNorm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from metpy.calc import wind_components
from metpy.cbook import get_test_data
from metpy.interpolate import interpolate_to_grid, remove_nan_observations
from metpy.plots import add_metpy_logo
from metpy.units import units
to_proj = ccrs.AlbersEqualArea(central_longitude=-97., central_latitude=38.)
###########################################
# Read in data
with get_test_data('station_data.txt') as f:
data = pd.read_csv(f, header=0, usecols=(2, 3, 4, 5, 18, 19),
names=['latitude', 'longitude', 'slp', 'temperature', 'wind_dir',
'wind_speed'],
na_values=-99999)
###########################################
# Project the lon/lat locations to our final projection
lon = data['longitude'].values
lat = data['latitude'].values
xp, yp, _ = to_proj.transform_points(ccrs.Geodetic(), lon, lat).T
###########################################
# Remove all missing data from pressure
x_masked, y_masked, pres = remove_nan_observations(xp, yp, data['slp'].values)
###########################################
# Interpolate pressure using Cressman interpolation
slpgridx, slpgridy, slp = interpolate_to_grid(x_masked, y_masked, pres, interp_type='cressman',
minimum_neighbors=1, search_radius=400000,
hres=100000)
##########################################
# Get wind information and mask where either speed or direction is unavailable
wind_speed = (data['wind_speed'].values * units('m/s')).to('knots')
wind_dir = data['wind_dir'].values * units.degree
good_indices = np.where((~np.isnan(wind_dir)) & (~np.isnan(wind_speed)))
x_masked = xp[good_indices]
y_masked = yp[good_indices]
wind_speed = wind_speed[good_indices]
wind_dir = wind_dir[good_indices]
###########################################
# Calculate u and v components of wind and then interpolate both.
#
# Both will have the same underlying grid so throw away grid returned from v interpolation.
u, v = wind_components(wind_speed, wind_dir)
windgridx, windgridy, uwind = interpolate_to_grid(x_masked, y_masked, np.array(u),
interp_type='cressman', search_radius=400000,
hres=100000)
_, _, vwind = interpolate_to_grid(x_masked, y_masked, np.array(v), interp_type='cressman',
search_radius=400000, hres=100000)
###########################################
# Get temperature information
x_masked, y_masked, t = remove_nan_observations(xp, yp, data['temperature'].values)
tempx, tempy, temp = interpolate_to_grid(x_masked, y_masked, t, interp_type='cressman',
minimum_neighbors=3, search_radius=400000, hres=35000)
temp = np.ma.masked_where(np.isnan(temp), temp)
###########################################
# Set up the map and plot the interpolated grids appropriately.
levels = list(range(-20, 20, 1))
cmap = plt.get_cmap('viridis')
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
fig = plt.figure(figsize=(20, 10))
add_metpy_logo(fig, 360, 120, size='large')
view = fig.add_subplot(1, 1, 1, projection=to_proj)
view.set_extent([-120, -70, 20, 50])
view.add_feature(cfeature.STATES.with_scale('50m'))
view.add_feature(cfeature.OCEAN)
view.add_feature(cfeature.COASTLINE.with_scale('50m'))
view.add_feature(cfeature.BORDERS, linestyle=':')
cs = view.contour(slpgridx, slpgridy, slp, colors='k', levels=list(range(990, 1034, 4)))
view.clabel(cs, inline=1, fontsize=12, fmt='%i')
mmb = view.pcolormesh(tempx, tempy, temp, cmap=cmap, norm=norm)
fig.colorbar(mmb, shrink=.4, pad=0.02, boundaries=levels)
view.barbs(windgridx, windgridy, uwind, vwind, alpha=.4, length=5)
view.set_title('Surface Temperature (shaded), SLP, and Wind.')
plt.show()
| bsd-3-clause |
printedheart/h2o-3 | h2o-py/tests/testdir_algos/glm/pyunit_link_functions_gaussianGLM.py | 5 | 1460 | import sys
sys.path.insert(1, "../../../")
import h2o, tests
import pandas as pd
import zipfile
import statsmodels.api as sm
def link_functions_gaussian():
print("Read in prostate data.")
h2o_data = h2o.import_file(path=h2o.locate("smalldata/prostate/prostate_complete.csv.zip"))
h2o_data.head()
sm_data = pd.read_csv(zipfile.ZipFile(h2o.locate("smalldata/prostate/prostate_complete.csv.zip")).
open("prostate_complete.csv")).as_matrix()
sm_data_response = sm_data[:,9]
sm_data_features = sm_data[:,1:9]
print("Testing for family: GAUSSIAN")
print("Set variables for h2o.")
myY = "GLEASON"
myX = ["ID","AGE","RACE","CAPSULE","DCAPS","PSA","VOL","DPROS"]
print("Create models with canonical link: IDENTITY")
h2o_model = h2o.glm(x=h2o_data[myX], y=h2o_data[myY], family="gaussian", link="identity",alpha=[0.5], Lambda=[0])
sm_model = sm.GLM(endog=sm_data_response, exog=sm_data_features,
family=sm.families.Gaussian(sm.families.links.identity)).fit()
print("Compare model deviances for link function identity")
h2o_deviance = h2o_model.residual_deviance() / h2o_model.null_deviance()
sm_deviance = sm_model.deviance / sm_model.null_deviance
assert h2o_deviance - sm_deviance < 0.01, "expected h2o to have an equivalent or better deviance measures"
if __name__ == "__main__":
tests.run_test(sys.argv, link_functions_gaussian)
| apache-2.0 |
mikebenfield/scikit-learn | examples/applications/plot_stock_market.py | 76 | 8522 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
try:
from matplotlib.finance import quotes_historical_yahoo_ochl
except ImportError:
# quotes_historical_yahoo_ochl was named quotes_historical_yahoo before matplotlib 1.4
from matplotlib.finance import quotes_historical_yahoo as quotes_historical_yahoo_ochl
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [quotes_historical_yahoo_ochl(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
Merinorus/adaisawesome | Homework/05 - Taming Text/HW05.py | 1 | 9971 |
# coding: utf-8
# # 05 - Taming Text
# In[1]:
from wordcloud import WordCloud
from nltk.corpus import stopwords
from nltk.sentiment import *
import pandas as pd
import numpy as np
import nltk
import time
import matplotlib.pyplot as plt
import seaborn as sns
import pycountry
get_ipython().magic('matplotlib inline')
# In[2]:
# import data
directory = 'hillary-clinton-emails/'
aliases = pd.read_csv(directory+'aliases.csv')
email_receivers = pd.read_csv(directory+'EmailReceivers.csv')
emails = pd.read_csv(directory+'Emails.csv')
persons = pd.read_csv(directory+'Persons.csv')
# ### Comparison between extracted body text and raw text
# In[3]:
i = 2
print(emails['ExtractedBodyText'][i], '\n\n END OF BODY TEXT \n\n', emails['RawText'][i])
# By reading a few emails we can see that the extracted body text is just the text that the email sender wrote (as stated on Kaggle) while the raw text gathers the previous emails forwarded or the whole discussion. Note that the extracted body text can sometimes contain NaNs. By including repeated messages in the raw text, you induce bias in the distribution of the words, thus we kept only the body text
# ## 1. Worldclouds
# In[4]:
# raw corpus
text_corpus = emails.ExtractedBodyText.dropna().values
raw_text = ' '.join(text_corpus)
# generate wordcloud
wordcloud = WordCloud().generate(raw_text)
plt.figure(figsize=(15,10))
plt.imshow(wordcloud)
plt.axis('off');
# In[8]:
def preprocess(text, stemmer):
print('Length of raw text: ', len(raw_text))
# tokenization (need to install models/punk from nltk.download())
tokens = nltk.word_tokenize(raw_text, language='english')
print('Number of tokens extracted: ', len(tokens))
# stopwords removal (need to install stopwords corpus in corpora/stopwords)
# cach stopwords to improve performance (70x speedup)
cached_stopwords = set(stopwords.words('english'))
filtered_tokens = [word for word in tokens if word not in cached_stopwords]
print('Number of tokens after stopword removal: ', len(filtered_tokens))
# stemming
if stemmer == 'snowball':
stemmer = nltk.SnowballStemmer('english')
elif stemmer == 'porter':
stemmer = nltk.PorterStemmer('english')
else:
print('choose appropriate stemmer')
stemmed_filtered_tokens = [stemmer.stem(t) for t in filtered_tokens]
# dump array in text file
output = ' '.join(stemmed_filtered_tokens)
with open("preprocessed_text.txt", "w") as text_file:
text_file.write(output)
preprocess(raw_text, 'snowball')
# In[9]:
preprocessed_text = open('preprocessed_text.txt').read()
wordcloud2 = WordCloud().generate(preprocessed_text)
plt.figure(figsize=(15,10))
plt.imshow(wordcloud2)
plt.axis('off');
# ## Comparison between the word clouds
#
# Looking at the wordcloud generated after having preprocessed the data, it seems that stemming hurt the "performance" of the wordcloud, indeed a number of words have been incorrectly stemmed e.g. department has been reduced to depart, secretary to secretary, message to messag and so on.
# ## 2. Sentiment analysis
# from [this link](https://www.kaggle.com/ampaho/d/kaggle/hillary-clinton-emails/foreign-policy-map-through-hrc-s-emails/code),we added the following words to be removed from the emails:
# * add "RE" because the iso3 code for Reunion islands.. but it appears a lot in emails to indicate the RE(sponses) to previous emails.
# * FM
# * TV is ISO2 code for Tuvalu but also refers to Television
# * AL is a given name and also ISO2 for Albania
# * BEN is a given name and also ISO3 for Benin
# * LA is Los angeles and iso 2 for Lao
# * AQ is abbreviation of "As Quoted" and iso 2 for Antarctica
#
# After a few runs, we looked at the (unusual) countries extracted. For example the country Saint Pierre and Miquelon is mentionned 631 times, not bad for such a small country. We noticed that an important number of emails have words capitalized and are misinterpreted as ISO2/ISO3 codes for countries. To cope with this we added the following stop words:
# * AND is ISO3 for Andorra
# * AM is ISO2 for Armenia
# * AT is ISO2 for Austria
# * IN is ISO2 for India
# * NO is ISO2 for Norway
# * PM is iSO2 for Saint Pierre and Miquelon
# * TO is ISO2 for Tonga
# * BY is ISO2 for Belarus
# * IE is ISO2 for Ireland (id est)
# * IT is ISO2 for Italy
# * MS is ISO2 for Montserrat
# In[11]:
def find_countries(tokens):
# find countries in a list of token
countries = []
for token in tokens:
try:
# search for any alpha_2 country name e.g. US, CH
country = pycountry.countries.get(alpha_2=token)
countries.append(country.name)
except KeyError:
try:
# search for any alpha_3 country name e.g. USA, CHE
country = pycountry.countries.get(alpha_3=token)
countries.append(country.name)
except KeyError:
try:
# search for a country by its name, title() upper cases every first letter but lower cases
# the other, hence it is handled last, but it deals with country written in lower case
country = pycountry.countries.get(name=token.title())
countries.append(country.name)
except KeyError: pass
return list(set(countries))
# In[23]:
def foreign_policy(emails, sentiment_analyzer):
start_time = time.time()
words_to_be_removed = ["RE", "FM", "TV", "LA", "AL", "BEN", "AQ", "AND", "AM", "AT", "IN", "NO", "PM", "TO",
"BY", "IE", "IT", "MS"]
vader_analyzer = SentimentIntensityAnalyzer()
foreign_policy = {}
cached_stopwords = set(stopwords.words('english'))
cached_stopwords.update(words_to_be_removed)
i=0
for email in emails: # TODO: regex instead of tokens lookup parce que ca prend trop de teeeeemps
#print('{:d} / {:d} emails processed'.format(i, len(emails)))
tokens = nltk.word_tokenize(email, language='english')
tokens = [word for word in tokens if word not in cached_stopwords]
# country lookup in tokens
countries = find_countries(tokens)
i +=1
if not countries: continue
if sentiment_analyzer =='vader':
sentiment = vader_analyzer.polarity_scores(email)
score = sentiment['compound']
#elif sentiment_analyzer ==''
for country in countries:
if not country in foreign_policy.keys():
foreign_policy.update({country: [score, 1]})
else:
foreign_policy[country][0] += score
foreign_policy[country][1] += 1
for country, value in foreign_policy.items():
foreign_policy.update({country: [(value[0]/value[1]), value[1]]})
print("--- %d seconds elapsed ---" % (time.time() - start_time))
return foreign_policy
# In[40]:
result = foreign_policy(text_corpus, sentiment_analyzer='vader')
# In[42]:
result
# In[54]:
pycountry.countries.get(name='Honduras')
# In[80]:
def create_palette(sentiments):
color_palette = []
minimum = np.min(sentiments)
maximum = np.max(sentiments)
for sentiment in sentiments:
rescaled = (sentiment-minimum) / (maximum - minimum)
g = rescaled
r = 1 - g
color_palette.append((r,g,0))
return color_palette
# ### Plotting the foreign policy
# In[87]:
df = pd.DataFrame.from_dict(result, orient='index')
df.reset_index(inplace=True)
df.columns =['Country', 'Sentiment', 'Count']
df = df[df['Count'] > 15]
df = df.sort_values('Sentiment', ascending=False)
gradient = create_palette(df['Sentiment'].values)
plt.figure(figsize=(15,7))
plot = sns.barplot(x='Country', y='Count', data=df, orient='vertical', palette=gradient)
plt.xticks(rotation=45);
plt.ylabel('Sentiment towards country');
# In[45]:
pycountry.countries.get(name='Palau')
# In[26]:
test_sentence = "and here I am AM TO speaking of France"
test_sentence = "This is a typical sentence, with don't. Punkts, something e.g. words US, U.S.A"
cached_stopwords = set(stopwords.words('english'))
words_to_be_removed = ["RE", "FM", "TV", "LA", "AL", "BEN", "AQ", "AND", "AM", "AT"]
cached_stopwords.update(words_to_be_removed)
tokens = nltk.word_tokenize(test_sentence)
#tokens = [word for word in tokens if word not in cached_stopwords]
countries = find_countries(tokens)
print(tokens)
# In[ ]:
test_sentence = 'This is a very pleasant day.'
#test_sentence = 'this is a completely neutral sentence'
polarity = {'Positive': 1, 'Neutral': 0, 'Negative': -1}
vader_analyzer = SentimentIntensityAnalyzer()
tokens = nltk.word_tokenize(test_sentence)
#tokens.remove('is')
result = ' '.join(tokens)
sentiment = vader_analyzer.polarity_scores(result)
#mean = -sentiment['neg'] + sentiment['pos']
#print(sentiment, mean)
np.max(sentiment.values())
# In[ ]:
test_set = ['nice nice good USA US switzerland', 'bad good bad bad bad libya', 'Switzerland good nice nice']
words_to_be_removed = ["RE", "FM", "TV", "LA", "AL", "BEN", "AQ"]
vader_analyzer = SentimentIntensityAnalyzer()
country_counts = {}
country_sentiments = {}
foreign_policy = {}
for email in test_set:
tokens = nltk.word_tokenize(email, language='english')
tokens = [word for word in tokens if word not in words_to_be_removed]
clean_email = ' '.join(tokens)
sentiment = vader_analyzer.polarity_scores(clean_email)
score = sentiment['compound']
# country lookup in raw text
countries = find_countries(tokens)
for country in countries:
if not country in foreign_policy.keys():
foreign_policy.update({country: [score, 1]})
else:
foreign_policy[country][0] += score
foreign_policy[country][1] += 1
for country, value in foreign_policy.items():
foreign_policy.update({country: [(value[0]/value[1]), value[1]]})
| gpl-3.0 |
kth-ros-pkg/hfts_grasp_planner | src/hfts_grasp_planner/utils.py | 1 | 19627 | #! /usr/bin/python
import numpy as np
from sensor_msgs.msg import PointCloud
from geometry_msgs.msg import Point32
import std_msgs.msg
import rospy
from sklearn.cluster import KMeans as KMeans
import math, copy, os, itertools
import matplotlib.pyplot as plt
from sklearn.neighbors import KDTree
from stl import mesh as stl_mesh_module
from abc import ABCMeta, abstractmethod
import openravepy as orpy
import hfts_grasp_planner.external.transformations as transformations
import hfts_grasp_planner.hfts_generation as hfts_generation
from scipy.spatial import ConvexHull
DEFAULT_HFTS_GENERATION_PARAMS = {'max_normal_variance': 0.2,
'min_contact_patch_radius': 0.015,
'contact_density': 300,
'max_num_points': 10000,
'position_weight': 2,
'branching_factor': 4,
'first_level_branching_factor': 3}
class ObjectIO(object):
__metaclass__ = ABCMeta
@abstractmethod
def get_hfts(self, obj_id, force_new=False):
pass
@abstractmethod
def get_openrave_file_name(self, obj_id):
pass
class ObjectFileIO(ObjectIO):
def __init__(self, data_path, var_filter=True,
hfts_generation_parameters=DEFAULT_HFTS_GENERATION_PARAMS,
max_num_points=10000):
self._data_path = data_path
self._b_var_filter = var_filter
self._hfts_generation_params = hfts_generation_parameters
self._max_num_points = max_num_points
self._last_obj_id = None
self._last_hfts = None
self._last_hfts_param = None
self._last_obj_com = None
def get_points(self, obj_id, b_filter=None):
if b_filter is None:
b_filter = self._b_var_filter
obj_file = self._data_path + '/' + obj_id + '/objectModel'
file_extension = self.get_obj_file_extension(obj_id)
points = None
contact_density = extract_hfts_gen_parameter(self._hfts_generation_params, 'contact_density')
if file_extension == '.ply':
points = hfts_generation.create_contact_points_from_ply(file_name=obj_file + file_extension,
density=contact_density)
elif file_extension == '.stl':
points = hfts_generation.create_contact_points_from_stl(file_name=obj_file + file_extension,
density=contact_density)
# TODO read point cloud if there no files stored.
# rospy.logwarn('No previous file found in the database, will proceed with raw point cloud instead.')
if points is not None:
com = np.mean(points[:, :3], axis=0)
if b_filter:
patch_size = extract_hfts_gen_parameter(self._hfts_generation_params,
'min_contact_patch_radius')
max_variance = extract_hfts_gen_parameter(self._hfts_generation_params,
'max_normal_variance')
points = hfts_generation.filter_unsmooth_points(points,
radius=patch_size,
max_variance=max_variance)
max_num_points = extract_hfts_gen_parameter(self._hfts_generation_params, 'max_num_points')
points = hfts_generation.down_sample_points(points, max_num_points)
else:
rospy.logerr('[ObjectFileIO] Failed to load mesh from ' + str(file_extension) +
' file for object ' + obj_id)
com = None
return points, com
def get_obj_file_extension(self, obj_id):
obj_file = self._data_path + '/' + obj_id + '/objectModel'
b_is_valid_file = os.path.exists(obj_file + '.ply') and os.path.isfile(obj_file + '.ply')
if b_is_valid_file:
return '.ply'
b_is_valid_file = os.path.exists(obj_file + '.stl') and os.path.isfile(obj_file + '.stl')
if b_is_valid_file:
return '.stl'
rospy.logerr('[ObjectFileIO::get_obj_file_extension] No compatible file found with prefix name ' + obj_file)
return None
def get_openrave_file_name(self, obj_id):
file_extension = self.get_obj_file_extension(obj_id)
if file_extension is not None:
return self._data_path + '/' + obj_id + '/' + 'objectModel' + file_extension
xml_file_name = self._data_path + '/' + obj_id + '/' + obj_id + '.kinbody.xml'
b_xml_file_exists = os.path.exists(xml_file_name)
if b_xml_file_exists:
return xml_file_name
return None
def get_hfts(self, obj_id, force_new=False):
# Check whether we have an HFTS for this object in memory
if self._last_obj_id != obj_id or force_new:
# If not, update
b_success = self._update_hfts(obj_id, force_new)
if not b_success:
return None, None, None
return self._last_hfts, self._last_hfts_param.astype(int), self._last_obj_com
def _read_hfts(self, obj_id, hfts_file, hfts_param_file, obj_com_file):
if os.path.exists(hfts_file) and os.path.isfile(hfts_file) \
and os.path.exists(hfts_param_file) and os.path.isfile(hfts_param_file) \
and os.path.exists(obj_com_file) and os.path.isfile(obj_com_file):
self._last_obj_id = obj_id
self._last_hfts = np.load(hfts_file)
self._last_hfts_param = np.load(hfts_param_file)
self._last_obj_com = np.load(obj_com_file)
return True
return False
def set_hfts_generation_parameters(self, params):
if type(params) is not dict:
raise TypeError('ObjectFileIO::set_hfts_generation_parameters] Expected a dictionary, received ' + str(type(params)))
self._hfts_generation_params = params
def show_hfts(self, level, or_drawer, object_transform=None, b_normals=False):
"""
Renders the most recently loaded hfts in OpenRAVE.
:param level: the level of the hfts to show
:param or_drawer: an instance of an OpenRAVEDrawer used for rendering
:param object_transform: An optional transform of the object frame.
:param b_normals: If true, also renders normals of each point
"""
if self._last_hfts is None:
rospy.logerr('[ObjectFileIO::show_hfts] Non hfts model loaded.')
return
if level > len(self._last_hfts_param) - 1:
raise ValueError('[objectFileIO::showHFTS] level ' + str(level) + ' does not exist')
hfts_generation.or_render_hfts(or_drawer, self._last_hfts, self._last_hfts_param,
level, transform=object_transform, b_normals=b_normals)
# b_factors = []
# for i in range(level + 1):
# b_factors.append(np.arange(self._last_hfts_param[i]))
# labels = itertools.product(*b_factors)
# hfts_labels = self._last_hfts[:, 6:7 + level]
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# for label in labels:
# idx = np.where((hfts_labels == label).all(axis=1))[0]
# cluster_points = self._last_hfts[idx, :3]
# ax.scatter(cluster_points[:, 0], cluster_points[:, 1], cluster_points[:, 2], c=np.random.rand(3,1), s = 100)
# plt.show()
def _update_hfts(self, obj_id, force_new=False):
""" Updates the cached hfts """
hfts_file = self._data_path + '/' + obj_id + '/hfts.npy'
hfts_param_file = self._data_path + '/' + obj_id + '/hftsParam.npy'
obj_com_file = self._data_path + '/' + obj_id + '/objCOM.npy'
# If it does not need to be regenerated, try to load it from file
if not force_new:
b_hfts_read = self._read_hfts(obj_id, hfts_file, hfts_param_file, obj_com_file)
if b_hfts_read:
return True
rospy.logwarn('HFTS is not available in the database')
# If we reached this point, we have to generate a new HFTS from mesh/point cloud
points, com = self.get_points(obj_id)
if points is None:
rospy.logerr('Could not generate HFTS for object ' + obj_id)
return False
# If we have points, generate an hfts
hfts_gen = hfts_generation.HFTSGenerator(points, com)
hfts_gen.set_branch_factor(extract_hfts_gen_parameter(self._hfts_generation_params, 'branching_factor'))
hfts_gen.set_position_weight(extract_hfts_gen_parameter(self._hfts_generation_params, 'position_weight'))
hfts_gen.run()
self._last_obj_id = obj_id
self._last_hfts = hfts_gen.get_hfts()
self._last_hfts_param = hfts_gen.get_hfts_param()
self._last_obj_com = com
hfts_gen.save_hfts(hfts_file=hfts_file, hfts_param_file=hfts_param_file,
com_file=obj_com_file)
return True
def extract_hfts_gen_parameter(param_dict, name):
if name in param_dict:
return param_dict[name]
elif name in DEFAULT_HFTS_GENERATION_PARAMS:
return DEFAULT_HFTS_GENERATION_PARAMS[name]
else:
raise ValueError('[utils::extract_hfts_gen_parameter] Unknown HFTS generation parameter ' + str(name))
def clamp(values, min_values, max_values):
clamped_values = len(values) * [0.0]
assert len(values) == len(min_values) and len(values) == len(max_values)
for i in range(len(values)):
clamped_values[i] = max(min(values[i], max_values[i]), min_values[i])
return clamped_values
def read_stl_file(file_id):
stl_mesh = stl_mesh_module.Mesh.from_file(file_id, calculate_normals=False)
points = np.zeros((len(stl_mesh.points), 6))
# Extract points with normals from the mesh surface
for face_idx in range(len(stl_mesh.points)):
# For this, we select the center of each face
points[face_idx, 0:3] = (stl_mesh.v0[face_idx] + stl_mesh.v1[face_idx] + stl_mesh.v2[face_idx]) / 3.0
normal_length = np.linalg.norm(stl_mesh.normals[face_idx])
if normal_length == 0.0:
stl_mesh.update_normals()
normal_length = np.linalg.norm(stl_mesh.normals[face_idx])
if normal_length == 0.0:
raise IOError('[utils.py::read_stl_file] Could not extract valid normals from the given file ' \
+ str(file_id))
points[face_idx, 3:6] = stl_mesh.normals[face_idx] / normal_length
return points
def create_point_cloud(points):
point_cloud = PointCloud()
header = std_msgs.msg.Header()
header.stamp = rospy.Time.now()
header.frame_id = 'map'
point_cloud.header = header
for point in points:
point_cloud.points.append(Point32(point[0], point[1], point[2]))
return point_cloud
def vec_angel_diff(v0, v1):
# in radians
assert len(v0) == len(v1)
l0 = math.sqrt(np.inner(v0, v0))
l1 = math.sqrt(np.inner(v1, v1))
if l0 == 0 or l1 == 0:
return 0
x = np.dot(v0, v1) / (l0*l1)
x = min(1.0, max(-1.0, x)) # fixing math precision error
angel = math.acos(x)
return angel
def dist_in_range(d, r):
if d < r[0]:
return r[0] - d
elif d > r[1]:
return d - r[1]
else:
return 0.0
def normal_distance(normals_a, normals_b):
d = 0.0
for i in range(len(normals_a)):
d += vec_angel_diff(normals_a[i], normals_b[i])
return d
def position_distance(pos_values_a, pos_values_b):
d = 0.0
for i in range(len(pos_values_a)):
d += np.linalg.norm(pos_values_a[i] - pos_values_b[i])
return d
def generate_wrench_cone(contact, normal, mu, center, face_n):
ref_vec = np.array([0, 0, 1])
center = np.array(center)
contact = np.array(contact)
normal = np.array(normal)
forces = []
angle_step = float(2 * math.pi) / face_n
# create face_n cone edges
for i in range(face_n):
angle = angle_step * i
x = mu * math.cos(angle)
y = mu * math.sin(angle)
z = 1
forces.append([x, y, z])
forces = np.asarray(forces)
rot_angle = transformations.angle_between_vectors(ref_vec, normal)
axis = np.cross(ref_vec, normal)
# take care of axis aligned normals
if np.linalg.norm(axis) > 0.01:
r_mat = transformations.rotation_matrix(rot_angle, axis)[:3, :3]
else:
if np.dot(ref_vec, normal) > 0:
r_mat = np.identity(3, float)
else:
r_mat = np.identity(3, float)
r_mat[1,1] = -1.
r_mat[2,2] = -1.
forces = np.dot(r_mat, np.transpose(forces))
forces = np.transpose(forces)
# compute wrenches
wrenches = []
for i in range(face_n):
torque = np.cross((contact - center), forces[i])
wrenches.append(np.append(forces[i], torque))
wrenches = np.asarray(wrenches)
return wrenches
def compute_grasp_stability(grasp_contacts, mu, com=None, face_n=8):
""" Computes Canny's grasp quality metric for the given n contacts.
:param grasp_contacts - An nx6 matrix where each row is a contact position and normal
:param mu - friction coefficient
:param com - center of mass of the grasped object (assumed to be [0,0,0], if None)
:param face_n - number of wrench cone faces
"""
if com is None:
com = [0, 0, 0]
wrenches = []
grasp = np.asarray(grasp_contacts)
# iterate over each contact
for i in range(len(grasp)):
wrench_cone = generate_wrench_cone(grasp[i, :3], grasp[i, 3:], mu, com, face_n)
for wrench in wrench_cone:
wrenches.append(list(wrench))
wrenches = np.asarray(wrenches)
hull = ConvexHull(wrenches, incremental=False, qhull_options='Pp QJ')
offsets = -hull.equations[:, -1]
return min(offsets)
class OpenRAVEDrawer:
def __init__(self, or_env, robot, debug):
"""
Create a new OpenRAVEDrawer.
Parameters:
or_env - OpenRAVE environment
robot - OpenRAVE robot
debug - Boolean flag whether to enable tree drawing
"""
self.or_env = or_env
self.robot = robot
self.debug = debug
self.handles = []
self._node_ids = {}
def clear(self):
self.handles = []
self._node_ids = {}
def get_eef_pose(self, config):
orig_config = self.robot.GetDOFValues()
self.robot.SetDOFValues(config)
manip = self.robot.GetActiveManipulator()
eef_pose = manip.GetEndEffectorTransform()
self.robot.SetDOFValues(orig_config)
return eef_pose
def draw_tree(self, tree, color):
if not tree.get_id() in self._node_ids:
self._node_ids[tree.get_id()] = {}
node_ids = self._node_ids[tree.get_id()]
with self.or_env:
for n in tree._nodes:
if n.get_id() in node_ids:
continue
else:
node_ids[n.get_id()] = True
eef_pose = self.get_eef_pose(n.get_sample_data().get_configuration())
if n.get_parent_id() == n.get_id():
root_aabb = orpy.AABB(eef_pose[0:3, 3], [0.01, 0.01, 0.01])
self.handles.append(self.draw_bounding_box(root_aabb, color, 2.0))
continue
parent_node = tree._nodes[n.get_parent_id()]
eef_pose_parent = self.get_eef_pose(parent_node.get_sample_data().get_configuration())
points = [x for x in eef_pose[0:3, 3]]
points.extend([x for x in eef_pose_parent[0:3, 3]])
# print numpy.linalg.norm(eef_pose[0:3,3] - eef_pose_parent[0:3, 3])
handle = self.or_env.drawlinelist(points, 2, colors=color)
self.handles.append(handle)
def draw_trees(self, forward_tree, backward_trees=[]):
if not self.debug:
return
# logging.debug('Forward tree size is: ' + str(forwardTree.size()))
self.draw_tree(forward_tree, color=[1, 0, 0])
for bTree in backward_trees:
# logging.debug('Backward tree of size: ' + str(bTree.size()))
self.draw_tree(bTree, color=[0, 0, 1])
def draw_arrow(self, point, dir, length=0.04, width=0.01, color=None):
if color is None:
color = [1, 0, 0, 1]
self.handles.append(self.or_env.drawarrow(point, point + length * dir, width, color))
def draw_pose(self, transform_matrix):
for i in range(3):
color = [0, 0, 0, 1]
color[i] = 1
self.draw_arrow(transform_matrix[:3, 3], transform_matrix[:3, i], color=color)
def draw_bounding_box(self, abb=None, color=[0.3, 0.3, 0.3], width=1.0, position=None, extents=None):
'''
Draws a bounding box.
:param abb: OpenRAVE style Axis aligned bounding box. If None, arguements position and extents must not be None.
:param array color: Array representing color as rgb
:param float width: Width of the lines to draw
:param position: Specifies the center position of the bounding box. Must not be None if abb is None.
:param extents: Specifies the extents of the bounding box (1/2 * [width, height, depth]).
Must not be None if abb is None.
:return: Reference to handles list.
'''
if abb is not None:
position = abb.pos()
extents = abb.extents()
if position is None or extents is None:
raise ValueError('Either abb must not be None or position and extents must not be None')
points = [[position[0] - extents[0], position[1] - extents[1], position[2] - extents[2]],
[position[0] - extents[0], position[1] + extents[1], position[2] - extents[2]],
[position[0] - extents[0], position[1] + extents[1], position[2] + extents[2]],
[position[0] - extents[0], position[1] - extents[1], position[2] + extents[2]],
[position[0] + extents[0], position[1] - extents[1], position[2] - extents[2]],
[position[0] + extents[0], position[1] + extents[1], position[2] - extents[2]],
[position[0] + extents[0], position[1] + extents[1], position[2] + extents[2]],
[position[0] + extents[0], position[1] - extents[1], position[2] + extents[2]]]
# Back face
edges = []
edges.extend(points[0])
edges.extend(points[1])
edges.extend(points[1])
edges.extend(points[2])
edges.extend(points[2])
edges.extend(points[3])
edges.extend(points[3])
edges.extend(points[0])
# Front face
edges.extend(points[4])
edges.extend(points[5])
edges.extend(points[5])
edges.extend(points[6])
edges.extend(points[6])
edges.extend(points[7])
edges.extend(points[7])
edges.extend(points[4])
# Sides
edges.extend(points[0])
edges.extend(points[4])
edges.extend(points[3])
edges.extend(points[7])
edges.extend(points[2])
edges.extend(points[6])
edges.extend(points[1])
edges.extend(points[5])
self.handles.append(self.or_env.drawlinelist(edges, width, color))
| bsd-3-clause |
phdowling/scikit-learn | examples/cluster/plot_agglomerative_clustering.py | 343 | 2931 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
chugunovyar/factoryForBuild | env/lib/python2.7/site-packages/matplotlib/contour.py | 10 | 68919 | """
These are classes to support contour plotting and
labelling for the axes class
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import warnings
import matplotlib as mpl
import numpy as np
from numpy import ma
import matplotlib._cntr as _cntr
import matplotlib._contour as _contour
import matplotlib.path as mpath
import matplotlib.ticker as ticker
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.collections as mcoll
import matplotlib.font_manager as font_manager
import matplotlib.text as text
import matplotlib.cbook as cbook
import matplotlib.mlab as mlab
import matplotlib.mathtext as mathtext
import matplotlib.patches as mpatches
import matplotlib.texmanager as texmanager
import matplotlib.transforms as mtrans
# Import needed for adding manual selection capability to clabel
from matplotlib.blocking_input import BlockingContourLabeler
# We can't use a single line collection for contour because a line
# collection can have only a single line style, and we want to be able to have
# dashed negative contours, for example, and solid positive contours.
# We could use a single polygon collection for filled contours, but it
# seems better to keep line and filled contours similar, with one collection
# per level.
class ClabelText(text.Text):
"""
Unlike the ordinary text, the get_rotation returns an updated
angle in the pixel coordinate assuming that the input rotation is
an angle in data coordinate (or whatever transform set).
"""
def get_rotation(self):
angle = text.Text.get_rotation(self)
trans = self.get_transform()
x, y = self.get_position()
new_angles = trans.transform_angles(np.array([angle]),
np.array([[x, y]]))
return new_angles[0]
class ContourLabeler(object):
"""Mixin to provide labelling capability to ContourSet"""
def clabel(self, *args, **kwargs):
"""
Label a contour plot.
Call signature::
clabel(cs, **kwargs)
Adds labels to line contours in *cs*, where *cs* is a
:class:`~matplotlib.contour.ContourSet` object returned by
contour.
::
clabel(cs, v, **kwargs)
only labels contours listed in *v*.
Optional keyword arguments:
*fontsize*:
size in points or relative size e.g., 'smaller', 'x-large'
*colors*:
- if *None*, the color of each label matches the color of
the corresponding contour
- if one string color, e.g., *colors* = 'r' or *colors* =
'red', all labels will be plotted in this color
- if a tuple of matplotlib color args (string, float, rgb, etc),
different labels will be plotted in different colors in the order
specified
*inline*:
controls whether the underlying contour is removed or
not. Default is *True*.
*inline_spacing*:
space in pixels to leave on each side of label when
placing inline. Defaults to 5. This spacing will be
exact for labels at locations where the contour is
straight, less so for labels on curved contours.
*fmt*:
a format string for the label. Default is '%1.3f'
Alternatively, this can be a dictionary matching contour
levels with arbitrary strings to use for each contour level
(i.e., fmt[level]=string), or it can be any callable, such
as a :class:`~matplotlib.ticker.Formatter` instance, that
returns a string when called with a numeric contour level.
*manual*:
if *True*, contour labels will be placed manually using
mouse clicks. Click the first button near a contour to
add a label, click the second button (or potentially both
mouse buttons at once) to finish adding labels. The third
button can be used to remove the last label added, but
only if labels are not inline. Alternatively, the keyboard
can be used to select label locations (enter to end label
placement, delete or backspace act like the third mouse button,
and any other key will select a label location).
*manual* can be an iterable object of x,y tuples. Contour labels
will be created as if mouse is clicked at each x,y positions.
*rightside_up*:
if *True* (default), label rotations will always be plus
or minus 90 degrees from level.
*use_clabeltext*:
if *True* (default is False), ClabelText class (instead of
matplotlib.Text) is used to create labels. ClabelText
recalculates rotation angles of texts during the drawing time,
therefore this can be used if aspect of the axes changes.
.. plot:: mpl_examples/pylab_examples/contour_demo.py
"""
"""
NOTES on how this all works:
clabel basically takes the input arguments and uses them to
add a list of "label specific" attributes to the ContourSet
object. These attributes are all of the form label* and names
should be fairly self explanatory.
Once these attributes are set, clabel passes control to the
labels method (case of automatic label placement) or
BlockingContourLabeler (case of manual label placement).
"""
fontsize = kwargs.get('fontsize', None)
inline = kwargs.get('inline', 1)
inline_spacing = kwargs.get('inline_spacing', 5)
self.labelFmt = kwargs.get('fmt', '%1.3f')
_colors = kwargs.get('colors', None)
self._use_clabeltext = kwargs.get('use_clabeltext', False)
# Detect if manual selection is desired and remove from argument list
self.labelManual = kwargs.get('manual', False)
self.rightside_up = kwargs.get('rightside_up', True)
if len(args) == 0:
levels = self.levels
indices = list(xrange(len(self.cvalues)))
elif len(args) == 1:
levlabs = list(args[0])
indices, levels = [], []
for i, lev in enumerate(self.levels):
if lev in levlabs:
indices.append(i)
levels.append(lev)
if len(levels) < len(levlabs):
msg = "Specified levels " + str(levlabs)
msg += "\n don't match available levels "
msg += str(self.levels)
raise ValueError(msg)
else:
raise TypeError("Illegal arguments to clabel, see help(clabel)")
self.labelLevelList = levels
self.labelIndiceList = indices
self.labelFontProps = font_manager.FontProperties()
self.labelFontProps.set_size(fontsize)
font_size_pts = self.labelFontProps.get_size_in_points()
self.labelFontSizeList = [font_size_pts] * len(levels)
if _colors is None:
self.labelMappable = self
self.labelCValueList = np.take(self.cvalues, self.labelIndiceList)
else:
cmap = colors.ListedColormap(_colors, N=len(self.labelLevelList))
self.labelCValueList = list(xrange(len(self.labelLevelList)))
self.labelMappable = cm.ScalarMappable(cmap=cmap,
norm=colors.NoNorm())
self.labelXYs = []
if cbook.iterable(self.labelManual):
for x, y in self.labelManual:
self.add_label_near(x, y, inline,
inline_spacing)
elif self.labelManual:
print('Select label locations manually using first mouse button.')
print('End manual selection with second mouse button.')
if not inline:
print('Remove last label by clicking third mouse button.')
blocking_contour_labeler = BlockingContourLabeler(self)
blocking_contour_labeler(inline, inline_spacing)
else:
self.labels(inline, inline_spacing)
# Hold on to some old attribute names. These are deprecated and will
# be removed in the near future (sometime after 2008-08-01), but
# keeping for now for backwards compatibility
self.cl = self.labelTexts
self.cl_xy = self.labelXYs
self.cl_cvalues = self.labelCValues
self.labelTextsList = cbook.silent_list('text.Text', self.labelTexts)
return self.labelTextsList
def print_label(self, linecontour, labelwidth):
"Return *False* if contours are too short for a label."
lcsize = len(linecontour)
if lcsize > 10 * labelwidth:
return True
xmax = np.amax(linecontour[:, 0])
xmin = np.amin(linecontour[:, 0])
ymax = np.amax(linecontour[:, 1])
ymin = np.amin(linecontour[:, 1])
lw = labelwidth
if (xmax - xmin) > 1.2 * lw or (ymax - ymin) > 1.2 * lw:
return True
else:
return False
def too_close(self, x, y, lw):
"Return *True* if a label is already near this location."
for loc in self.labelXYs:
d = np.sqrt((x - loc[0]) ** 2 + (y - loc[1]) ** 2)
if d < 1.2 * lw:
return True
return False
def get_label_coords(self, distances, XX, YY, ysize, lw):
"""
Return x, y, and the index of a label location.
Labels are plotted at a location with the smallest
deviation of the contour from a straight line
unless there is another label nearby, in which case
the next best place on the contour is picked up.
If all such candidates are rejected, the beginning
of the contour is chosen.
"""
hysize = int(ysize / 2)
adist = np.argsort(distances)
for ind in adist:
x, y = XX[ind][hysize], YY[ind][hysize]
if self.too_close(x, y, lw):
continue
return x, y, ind
ind = adist[0]
x, y = XX[ind][hysize], YY[ind][hysize]
return x, y, ind
def get_label_width(self, lev, fmt, fsize):
"""
Return the width of the label in points.
"""
if not cbook.is_string_like(lev):
lev = self.get_text(lev, fmt)
lev, ismath = text.Text.is_math_text(lev)
if ismath == 'TeX':
if not hasattr(self, '_TeX_manager'):
self._TeX_manager = texmanager.TexManager()
lw, _, _ = self._TeX_manager.get_text_width_height_descent(lev,
fsize)
elif ismath:
if not hasattr(self, '_mathtext_parser'):
self._mathtext_parser = mathtext.MathTextParser('bitmap')
img, _ = self._mathtext_parser.parse(lev, dpi=72,
prop=self.labelFontProps)
lw = img.get_width() # at dpi=72, the units are PostScript points
else:
# width is much less than "font size"
lw = (len(lev)) * fsize * 0.6
return lw
def get_real_label_width(self, lev, fmt, fsize):
"""
This computes actual onscreen label width.
This uses some black magic to determine onscreen extent of non-drawn
label. This magic may not be very robust.
This method is not being used, and may be modified or removed.
"""
# Find middle of axes
xx = np.mean(np.asarray(self.ax.axis()).reshape(2, 2), axis=1)
# Temporarily create text object
t = text.Text(xx[0], xx[1])
self.set_label_props(t, self.get_text(lev, fmt), 'k')
# Some black magic to get onscreen extent
# NOTE: This will only work for already drawn figures, as the canvas
# does not have a renderer otherwise. This is the reason this function
# can't be integrated into the rest of the code.
bbox = t.get_window_extent(renderer=self.ax.figure.canvas.renderer)
# difference in pixel extent of image
lw = np.diff(bbox.corners()[0::2, 0])[0]
return lw
def set_label_props(self, label, text, color):
"set the label properties - color, fontsize, text"
label.set_text(text)
label.set_color(color)
label.set_fontproperties(self.labelFontProps)
label.set_clip_box(self.ax.bbox)
def get_text(self, lev, fmt):
"get the text of the label"
if cbook.is_string_like(lev):
return lev
else:
if isinstance(fmt, dict):
return fmt[lev]
elif six.callable(fmt):
return fmt(lev)
else:
return fmt % lev
def locate_label(self, linecontour, labelwidth):
"""
Find a good place to plot a label (relatively flat
part of the contour).
"""
nsize = len(linecontour)
if labelwidth > 1:
xsize = int(np.ceil(nsize / labelwidth))
else:
xsize = 1
if xsize == 1:
ysize = nsize
else:
ysize = int(labelwidth)
XX = np.resize(linecontour[:, 0], (xsize, ysize))
YY = np.resize(linecontour[:, 1], (xsize, ysize))
# I might have fouled up the following:
yfirst = YY[:, 0].reshape(xsize, 1)
ylast = YY[:, -1].reshape(xsize, 1)
xfirst = XX[:, 0].reshape(xsize, 1)
xlast = XX[:, -1].reshape(xsize, 1)
s = (yfirst - YY) * (xlast - xfirst) - (xfirst - XX) * (ylast - yfirst)
L = np.sqrt((xlast - xfirst) ** 2 + (ylast - yfirst) ** 2).ravel()
dist = np.add.reduce(([(abs(s)[i] / L[i]) for i in range(xsize)]), -1)
x, y, ind = self.get_label_coords(dist, XX, YY, ysize, labelwidth)
# There must be a more efficient way...
lc = [tuple(l) for l in linecontour]
dind = lc.index((x, y))
return x, y, dind
def calc_label_rot_and_inline(self, slc, ind, lw, lc=None, spacing=5):
"""
This function calculates the appropriate label rotation given
the linecontour coordinates in screen units, the index of the
label location and the label width.
It will also break contour and calculate inlining if *lc* is
not empty (lc defaults to the empty list if None). *spacing*
is the space around the label in pixels to leave empty.
Do both of these tasks at once to avoid calling mlab.path_length
multiple times, which is relatively costly.
The method used here involves calculating the path length
along the contour in pixel coordinates and then looking
approximately label width / 2 away from central point to
determine rotation and then to break contour if desired.
"""
if lc is None:
lc = []
# Half the label width
hlw = lw / 2.0
# Check if closed and, if so, rotate contour so label is at edge
closed = mlab.is_closed_polygon(slc)
if closed:
slc = np.r_[slc[ind:-1], slc[:ind + 1]]
if len(lc): # Rotate lc also if not empty
lc = np.r_[lc[ind:-1], lc[:ind + 1]]
ind = 0
# Path length in pixel space
pl = mlab.path_length(slc)
pl = pl - pl[ind]
# Use linear interpolation to get points around label
xi = np.array([-hlw, hlw])
if closed: # Look at end also for closed contours
dp = np.array([pl[-1], 0])
else:
dp = np.zeros_like(xi)
ll = mlab.less_simple_linear_interpolation(pl, slc, dp + xi,
extrap=True)
# get vector in pixel space coordinates from one point to other
dd = np.diff(ll, axis=0).ravel()
# Get angle of vector - must be calculated in pixel space for
# text rotation to work correctly
if np.all(dd == 0): # Must deal with case of zero length label
rotation = 0.0
else:
rotation = np.arctan2(dd[1], dd[0]) * 180.0 / np.pi
if self.rightside_up:
# Fix angle so text is never upside-down
if rotation > 90:
rotation = rotation - 180.0
if rotation < -90:
rotation = 180.0 + rotation
# Break contour if desired
nlc = []
if len(lc):
# Expand range by spacing
xi = dp + xi + np.array([-spacing, spacing])
# Get indices near points of interest
I = mlab.less_simple_linear_interpolation(
pl, np.arange(len(pl)), xi, extrap=False)
# If those indices aren't beyond contour edge, find x,y
if (not np.isnan(I[0])) and int(I[0]) != I[0]:
xy1 = mlab.less_simple_linear_interpolation(
pl, lc, [xi[0]])
if (not np.isnan(I[1])) and int(I[1]) != I[1]:
xy2 = mlab.less_simple_linear_interpolation(
pl, lc, [xi[1]])
# Round to integer values but keep as float
# To allow check against nan below
I = [np.floor(I[0]), np.ceil(I[1])]
# Actually break contours
if closed:
# This will remove contour if shorter than label
if np.all(~np.isnan(I)):
nlc.append(np.r_[xy2, lc[int(I[1]):int(I[0]) + 1], xy1])
else:
# These will remove pieces of contour if they have length zero
if not np.isnan(I[0]):
nlc.append(np.r_[lc[:int(I[0]) + 1], xy1])
if not np.isnan(I[1]):
nlc.append(np.r_[xy2, lc[int(I[1]):]])
# The current implementation removes contours completely
# covered by labels. Uncomment line below to keep
# original contour if this is the preferred behavior.
# if not len(nlc): nlc = [ lc ]
return rotation, nlc
def _get_label_text(self, x, y, rotation):
dx, dy = self.ax.transData.inverted().transform_point((x, y))
t = text.Text(dx, dy, rotation=rotation,
horizontalalignment='center',
verticalalignment='center')
return t
def _get_label_clabeltext(self, x, y, rotation):
# x, y, rotation is given in pixel coordinate. Convert them to
# the data coordinate and create a label using ClabelText
# class. This way, the roation of the clabel is along the
# contour line always.
transDataInv = self.ax.transData.inverted()
dx, dy = transDataInv.transform_point((x, y))
drotation = transDataInv.transform_angles(np.array([rotation]),
np.array([[x, y]]))
t = ClabelText(dx, dy, rotation=drotation[0],
horizontalalignment='center',
verticalalignment='center')
return t
def _add_label(self, t, x, y, lev, cvalue):
color = self.labelMappable.to_rgba(cvalue, alpha=self.alpha)
_text = self.get_text(lev, self.labelFmt)
self.set_label_props(t, _text, color)
self.labelTexts.append(t)
self.labelCValues.append(cvalue)
self.labelXYs.append((x, y))
# Add label to plot here - useful for manual mode label selection
self.ax.add_artist(t)
def add_label(self, x, y, rotation, lev, cvalue):
"""
Add contour label using :class:`~matplotlib.text.Text` class.
"""
t = self._get_label_text(x, y, rotation)
self._add_label(t, x, y, lev, cvalue)
def add_label_clabeltext(self, x, y, rotation, lev, cvalue):
"""
Add contour label using :class:`ClabelText` class.
"""
# x, y, rotation is given in pixel coordinate. Convert them to
# the data coordinate and create a label using ClabelText
# class. This way, the roation of the clabel is along the
# contour line always.
t = self._get_label_clabeltext(x, y, rotation)
self._add_label(t, x, y, lev, cvalue)
def add_label_near(self, x, y, inline=True, inline_spacing=5,
transform=None):
"""
Add a label near the point (x, y). If transform is None
(default), (x, y) is in data coordinates; if transform is
False, (x, y) is in display coordinates; otherwise, the
specified transform will be used to translate (x, y) into
display coordinates.
*inline*:
controls whether the underlying contour is removed or
not. Default is *True*.
*inline_spacing*:
space in pixels to leave on each side of label when
placing inline. Defaults to 5. This spacing will be
exact for labels at locations where the contour is
straight, less so for labels on curved contours.
"""
if transform is None:
transform = self.ax.transData
if transform:
x, y = transform.transform_point((x, y))
# find the nearest contour _in screen units_
conmin, segmin, imin, xmin, ymin = self.find_nearest_contour(
x, y, self.labelIndiceList)[:5]
# The calc_label_rot_and_inline routine requires that (xmin,ymin)
# be a vertex in the path. So, if it isn't, add a vertex here
# grab the paths from the collections
paths = self.collections[conmin].get_paths()
# grab the correct segment
active_path = paths[segmin]
# grab it's verticies
lc = active_path.vertices
# sort out where the new vertex should be added data-units
xcmin = self.ax.transData.inverted().transform_point([xmin, ymin])
# if there isn't a vertex close enough
if not np.allclose(xcmin, lc[imin]):
# insert new data into the vertex list
lc = np.r_[lc[:imin], np.array(xcmin)[None, :], lc[imin:]]
# replace the path with the new one
paths[segmin] = mpath.Path(lc)
# Get index of nearest level in subset of levels used for labeling
lmin = self.labelIndiceList.index(conmin)
# Coordinates of contour
paths = self.collections[conmin].get_paths()
lc = paths[segmin].vertices
# In pixel/screen space
slc = self.ax.transData.transform(lc)
# Get label width for rotating labels and breaking contours
lw = self.get_label_width(self.labelLevelList[lmin],
self.labelFmt, self.labelFontSizeList[lmin])
# Figure out label rotation.
if inline:
lcarg = lc
else:
lcarg = None
rotation, nlc = self.calc_label_rot_and_inline(
slc, imin, lw, lcarg,
inline_spacing)
self.add_label(xmin, ymin, rotation, self.labelLevelList[lmin],
self.labelCValueList[lmin])
if inline:
# Remove old, not looping over paths so we can do this up front
paths.pop(segmin)
# Add paths if not empty or single point
for n in nlc:
if len(n) > 1:
paths.append(mpath.Path(n))
def pop_label(self, index=-1):
"""Defaults to removing last label, but any index can be supplied"""
self.labelCValues.pop(index)
t = self.labelTexts.pop(index)
t.remove()
def labels(self, inline, inline_spacing):
if self._use_clabeltext:
add_label = self.add_label_clabeltext
else:
add_label = self.add_label
for icon, lev, fsize, cvalue in zip(
self.labelIndiceList, self.labelLevelList,
self.labelFontSizeList, self.labelCValueList):
con = self.collections[icon]
trans = con.get_transform()
lw = self.get_label_width(lev, self.labelFmt, fsize)
lw *= self.ax.figure.dpi / 72.0 # scale to screen coordinates
additions = []
paths = con.get_paths()
for segNum, linepath in enumerate(paths):
lc = linepath.vertices # Line contour
slc0 = trans.transform(lc) # Line contour in screen coords
# For closed polygons, add extra point to avoid division by
# zero in print_label and locate_label. Other than these
# functions, this is not necessary and should probably be
# eventually removed.
if mlab.is_closed_polygon(lc):
slc = np.r_[slc0, slc0[1:2, :]]
else:
slc = slc0
# Check if long enough for a label
if self.print_label(slc, lw):
x, y, ind = self.locate_label(slc, lw)
if inline:
lcarg = lc
else:
lcarg = None
rotation, new = self.calc_label_rot_and_inline(
slc0, ind, lw, lcarg,
inline_spacing)
# Actually add the label
add_label(x, y, rotation, lev, cvalue)
# If inline, add new contours
if inline:
for n in new:
# Add path if not empty or single point
if len(n) > 1:
additions.append(mpath.Path(n))
else: # If not adding label, keep old path
additions.append(linepath)
# After looping over all segments on a contour, remove old
# paths and add new ones if inlining
if inline:
del paths[:]
paths.extend(additions)
def _find_closest_point_on_leg(p1, p2, p0):
"""find closest point to p0 on line segment connecting p1 and p2"""
# handle degenerate case
if np.all(p2 == p1):
d = np.sum((p0 - p1)**2)
return d, p1
d21 = p2 - p1
d01 = p0 - p1
# project on to line segment to find closest point
proj = np.dot(d01, d21) / np.dot(d21, d21)
if proj < 0:
proj = 0
if proj > 1:
proj = 1
pc = p1 + proj * d21
# find squared distance
d = np.sum((pc-p0)**2)
return d, pc
def _find_closest_point_on_path(lc, point):
"""
lc: coordinates of vertices
point: coordinates of test point
"""
# find index of closest vertex for this segment
ds = np.sum((lc - point[None, :])**2, 1)
imin = np.argmin(ds)
dmin = np.inf
xcmin = None
legmin = (None, None)
closed = mlab.is_closed_polygon(lc)
# build list of legs before and after this vertex
legs = []
if imin > 0 or closed:
legs.append(((imin-1) % len(lc), imin))
if imin < len(lc) - 1 or closed:
legs.append((imin, (imin+1) % len(lc)))
for leg in legs:
d, xc = _find_closest_point_on_leg(lc[leg[0]], lc[leg[1]], point)
if d < dmin:
dmin = d
xcmin = xc
legmin = leg
return (dmin, xcmin, legmin)
class ContourSet(cm.ScalarMappable, ContourLabeler):
"""
Store a set of contour lines or filled regions.
User-callable method: clabel
Useful attributes:
ax:
The axes object in which the contours are drawn
collections:
a silent_list of LineCollections or PolyCollections
levels:
contour levels
layers:
same as levels for line contours; half-way between
levels for filled contours. See :meth:`_process_colors`.
"""
def __init__(self, ax, *args, **kwargs):
"""
Draw contour lines or filled regions, depending on
whether keyword arg 'filled' is *False* (default) or *True*.
The first three arguments must be:
*ax*: axes object.
*levels*: [level0, level1, ..., leveln]
A list of floating point numbers indicating the contour
levels.
*allsegs*: [level0segs, level1segs, ...]
List of all the polygon segments for all the *levels*.
For contour lines ``len(allsegs) == len(levels)``, and for
filled contour regions ``len(allsegs) = len(levels)-1``.
level0segs = [polygon0, polygon1, ...]
polygon0 = array_like [[x0,y0], [x1,y1], ...]
*allkinds*: *None* or [level0kinds, level1kinds, ...]
Optional list of all the polygon vertex kinds (code types), as
described and used in Path. This is used to allow multiply-
connected paths such as holes within filled polygons.
If not *None*, len(allkinds) == len(allsegs).
level0kinds = [polygon0kinds, ...]
polygon0kinds = [vertexcode0, vertexcode1, ...]
If *allkinds* is not *None*, usually all polygons for a particular
contour level are grouped together so that
level0segs = [polygon0] and level0kinds = [polygon0kinds].
Keyword arguments are as described in
:attr:`matplotlib.contour.QuadContourSet.contour_doc`.
**Examples:**
.. plot:: mpl_examples/misc/contour_manual.py
"""
self.ax = ax
self.levels = kwargs.get('levels', None)
self.filled = kwargs.get('filled', False)
self.linewidths = kwargs.get('linewidths', None)
self.linestyles = kwargs.get('linestyles', None)
self.hatches = kwargs.get('hatches', [None])
self.alpha = kwargs.get('alpha', None)
self.origin = kwargs.get('origin', None)
self.extent = kwargs.get('extent', None)
cmap = kwargs.get('cmap', None)
self.colors = kwargs.get('colors', None)
norm = kwargs.get('norm', None)
vmin = kwargs.get('vmin', None)
vmax = kwargs.get('vmax', None)
self.extend = kwargs.get('extend', 'neither')
self.antialiased = kwargs.get('antialiased', None)
if self.antialiased is None and self.filled:
self.antialiased = False # eliminate artifacts; we are not
# stroking the boundaries.
# The default for line contours will be taken from
# the LineCollection default, which uses the
# rcParams['lines.antialiased']
self.nchunk = kwargs.get('nchunk', 0)
self.locator = kwargs.get('locator', None)
if (isinstance(norm, colors.LogNorm)
or isinstance(self.locator, ticker.LogLocator)):
self.logscale = True
if norm is None:
norm = colors.LogNorm()
if self.extend is not 'neither':
raise ValueError('extend kwarg does not work yet with log '
' scale')
else:
self.logscale = False
if self.origin not in [None, 'lower', 'upper', 'image']:
raise ValueError("If given, *origin* must be one of [ 'lower' |"
" 'upper' | 'image']")
if self.extent is not None and len(self.extent) != 4:
raise ValueError("If given, *extent* must be '[ *None* |"
" (x0,x1,y0,y1) ]'")
if self.colors is not None and cmap is not None:
raise ValueError('Either colors or cmap must be None')
if self.origin == 'image':
self.origin = mpl.rcParams['image.origin']
self._transform = kwargs.get('transform', None)
self._process_args(*args, **kwargs)
self._process_levels()
if self.colors is not None:
ncolors = len(self.levels)
if self.filled:
ncolors -= 1
i0 = 0
# Handle the case where colors are given for the extended
# parts of the contour.
extend_min = self.extend in ['min', 'both']
extend_max = self.extend in ['max', 'both']
use_set_under_over = False
# if we are extending the lower end, and we've been given enough
# colors then skip the first color in the resulting cmap. For the
# extend_max case we don't need to worry about passing more colors
# than ncolors as ListedColormap will clip.
total_levels = ncolors + int(extend_min) + int(extend_max)
if (len(self.colors) == total_levels and
any([extend_min, extend_max])):
use_set_under_over = True
if extend_min:
i0 = 1
cmap = colors.ListedColormap(self.colors[i0:None], N=ncolors)
if use_set_under_over:
if extend_min:
cmap.set_under(self.colors[0])
if extend_max:
cmap.set_over(self.colors[-1])
if self.filled:
self.collections = cbook.silent_list('mcoll.PathCollection')
else:
self.collections = cbook.silent_list('mcoll.LineCollection')
# label lists must be initialized here
self.labelTexts = []
self.labelCValues = []
kw = {'cmap': cmap}
if norm is not None:
kw['norm'] = norm
# sets self.cmap, norm if needed;
cm.ScalarMappable.__init__(self, **kw)
if vmin is not None:
self.norm.vmin = vmin
if vmax is not None:
self.norm.vmax = vmax
self._process_colors()
self.allsegs, self.allkinds = self._get_allsegs_and_allkinds()
if self.filled:
if self.linewidths is not None:
warnings.warn('linewidths is ignored by contourf')
# Lower and upper contour levels.
lowers, uppers = self._get_lowers_and_uppers()
# Ensure allkinds can be zipped below.
if self.allkinds is None:
self.allkinds = [None] * len(self.allsegs)
for level, level_upper, segs, kinds in \
zip(lowers, uppers, self.allsegs, self.allkinds):
paths = self._make_paths(segs, kinds)
# Default zorder taken from Collection
zorder = kwargs.get('zorder', 1)
col = mcoll.PathCollection(
paths,
antialiaseds=(self.antialiased,),
edgecolors='none',
alpha=self.alpha,
transform=self.get_transform(),
zorder=zorder)
self.ax.add_collection(col, autolim=False)
self.collections.append(col)
else:
tlinewidths = self._process_linewidths()
self.tlinewidths = tlinewidths
tlinestyles = self._process_linestyles()
aa = self.antialiased
if aa is not None:
aa = (self.antialiased,)
for level, width, lstyle, segs in \
zip(self.levels, tlinewidths, tlinestyles, self.allsegs):
# Default zorder taken from LineCollection
zorder = kwargs.get('zorder', 2)
col = mcoll.LineCollection(
segs,
antialiaseds=aa,
linewidths=width,
linestyles=[lstyle],
alpha=self.alpha,
transform=self.get_transform(),
zorder=zorder)
col.set_label('_nolegend_')
self.ax.add_collection(col, autolim=False)
self.collections.append(col)
for col in self.collections:
col.sticky_edges.x[:] = [self._mins[0], self._maxs[0]]
col.sticky_edges.y[:] = [self._mins[1], self._maxs[1]]
self.ax.update_datalim([self._mins, self._maxs])
self.ax.autoscale_view(tight=True)
self.changed() # set the colors
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform`
instance used by this ContourSet.
"""
if self._transform is None:
self._transform = self.ax.transData
elif (not isinstance(self._transform, mtrans.Transform)
and hasattr(self._transform, '_as_mpl_transform')):
self._transform = self._transform._as_mpl_transform(self.ax)
return self._transform
def __getstate__(self):
state = self.__dict__.copy()
# the C object _contour_generator cannot currently be pickled. This
# isn't a big issue as it is not actually used once the contour has
# been calculated.
state['_contour_generator'] = None
return state
def legend_elements(self, variable_name='x', str_format=str):
"""
Return a list of artist and labels suitable for passing through
to :func:`plt.legend` which represent this ContourSet.
Args:
*variable_name*: the string used inside the inequality used
on the labels
*str_format*: function used to format the numbers in the labels
"""
artists = []
labels = []
if self.filled:
lowers, uppers = self._get_lowers_and_uppers()
n_levels = len(self.collections)
for i, (collection, lower, upper) in enumerate(
zip(self.collections, lowers, uppers)):
patch = mpatches.Rectangle(
(0, 0), 1, 1,
facecolor=collection.get_facecolor()[0],
hatch=collection.get_hatch(),
alpha=collection.get_alpha())
artists.append(patch)
lower = str_format(lower)
upper = str_format(upper)
if i == 0 and self.extend in ('min', 'both'):
labels.append(r'$%s \leq %s$' % (variable_name,
lower))
elif i == n_levels - 1 and self.extend in ('max', 'both'):
labels.append(r'$%s > %s$' % (variable_name,
upper))
else:
labels.append(r'$%s < %s \leq %s$' % (lower,
variable_name,
upper))
else:
for collection, level in zip(self.collections, self.levels):
patch = mcoll.LineCollection(None)
patch.update_from(collection)
artists.append(patch)
# format the level for insertion into the labels
level = str_format(level)
labels.append(r'$%s = %s$' % (variable_name, level))
return artists, labels
def _process_args(self, *args, **kwargs):
"""
Process *args* and *kwargs*; override in derived classes.
Must set self.levels, self.zmin and self.zmax, and update axes
limits.
"""
self.levels = args[0]
self.allsegs = args[1]
self.allkinds = len(args) > 2 and args[2] or None
self.zmax = np.amax(self.levels)
self.zmin = np.amin(self.levels)
self._auto = False
# Check lengths of levels and allsegs.
if self.filled:
if len(self.allsegs) != len(self.levels) - 1:
raise ValueError('must be one less number of segments as '
'levels')
else:
if len(self.allsegs) != len(self.levels):
raise ValueError('must be same number of segments as levels')
# Check length of allkinds.
if (self.allkinds is not None and
len(self.allkinds) != len(self.allsegs)):
raise ValueError('allkinds has different length to allsegs')
# Determine x,y bounds and update axes data limits.
flatseglist = [s for seg in self.allsegs for s in seg]
points = np.concatenate(flatseglist, axis=0)
self._mins = points.min(axis=0)
self._maxs = points.max(axis=0)
def _get_allsegs_and_allkinds(self):
"""
Override in derived classes to create and return allsegs and allkinds.
allkinds can be None.
"""
return self.allsegs, self.allkinds
def _get_lowers_and_uppers(self):
"""
Return (lowers,uppers) for filled contours.
"""
lowers = self._levels[:-1]
if self.zmin == lowers[0]:
# Include minimum values in lowest interval
lowers = lowers.copy() # so we don't change self._levels
if self.logscale:
lowers[0] = 0.99 * self.zmin
else:
lowers[0] -= 1
uppers = self._levels[1:]
return (lowers, uppers)
def _make_paths(self, segs, kinds):
if kinds is not None:
return [mpath.Path(seg, codes=kind)
for seg, kind in zip(segs, kinds)]
else:
return [mpath.Path(seg) for seg in segs]
def changed(self):
tcolors = [(tuple(rgba),)
for rgba in self.to_rgba(self.cvalues, alpha=self.alpha)]
self.tcolors = tcolors
hatches = self.hatches * len(tcolors)
for color, hatch, collection in zip(tcolors, hatches,
self.collections):
if self.filled:
collection.set_facecolor(color)
# update the collection's hatch (may be None)
collection.set_hatch(hatch)
else:
collection.set_color(color)
for label, cv in zip(self.labelTexts, self.labelCValues):
label.set_alpha(self.alpha)
label.set_color(self.labelMappable.to_rgba(cv))
# add label colors
cm.ScalarMappable.changed(self)
def _autolev(self, N):
"""
Select contour levels to span the data.
We need two more levels for filled contours than for
line contours, because for the latter we need to specify
the lower and upper boundary of each range. For example,
a single contour boundary, say at z = 0, requires only
one contour line, but two filled regions, and therefore
three levels to provide boundaries for both regions.
"""
if self.locator is None:
if self.logscale:
self.locator = ticker.LogLocator()
else:
self.locator = ticker.MaxNLocator(N + 1, min_n_ticks=1)
zmax = self.zmax
zmin = self.zmin
lev = self.locator.tick_values(zmin, zmax)
self._auto = True
if self.filled:
return lev
# For line contours, drop levels outside the data range.
return lev[(lev > zmin) & (lev < zmax)]
def _contour_level_args(self, z, args):
"""
Determine the contour levels and store in self.levels.
"""
if self.filled:
fn = 'contourf'
else:
fn = 'contour'
self._auto = False
if self.levels is None:
if len(args) == 0:
lev = self._autolev(7)
else:
level_arg = args[0]
try:
if type(level_arg) == int:
lev = self._autolev(level_arg)
else:
lev = np.asarray(level_arg).astype(np.float64)
except:
raise TypeError(
"Last %s arg must give levels; see help(%s)" %
(fn, fn))
self.levels = lev
if self.filled and len(self.levels) < 2:
raise ValueError("Filled contours require at least 2 levels.")
if len(self.levels) > 1 and np.amin(np.diff(self.levels)) <= 0.0:
if hasattr(self, '_corner_mask') and self._corner_mask == 'legacy':
warnings.warn("Contour levels are not increasing")
else:
raise ValueError("Contour levels must be increasing")
def _process_levels(self):
"""
Assign values to :attr:`layers` based on :attr:`levels`,
adding extended layers as needed if contours are filled.
For line contours, layers simply coincide with levels;
a line is a thin layer. No extended levels are needed
with line contours.
"""
# The following attributes are no longer needed, and
# should be deprecated and removed to reduce confusion.
self.vmin = np.amin(self.levels)
self.vmax = np.amax(self.levels)
# Make a private _levels to include extended regions; we
# want to leave the original levels attribute unchanged.
# (Colorbar needs this even for line contours.)
self._levels = list(self.levels)
if self.extend in ('both', 'min'):
self._levels.insert(0, min(self.levels[0], self.zmin) - 1)
if self.extend in ('both', 'max'):
self._levels.append(max(self.levels[-1], self.zmax) + 1)
self._levels = np.asarray(self._levels)
if not self.filled:
self.layers = self.levels
return
# layer values are mid-way between levels
self.layers = 0.5 * (self._levels[:-1] + self._levels[1:])
# ...except that extended layers must be outside the
# normed range:
if self.extend in ('both', 'min'):
self.layers[0] = -np.inf
if self.extend in ('both', 'max'):
self.layers[-1] = np.inf
def _process_colors(self):
"""
Color argument processing for contouring.
Note that we base the color mapping on the contour levels
and layers, not on the actual range of the Z values. This
means we don't have to worry about bad values in Z, and we
always have the full dynamic range available for the selected
levels.
The color is based on the midpoint of the layer, except for
extended end layers. By default, the norm vmin and vmax
are the extreme values of the non-extended levels. Hence,
the layer color extremes are not the extreme values of
the colormap itself, but approach those values as the number
of levels increases. An advantage of this scheme is that
line contours, when added to filled contours, take on
colors that are consistent with those of the filled regions;
for example, a contour line on the boundary between two
regions will have a color intermediate between those
of the regions.
"""
self.monochrome = self.cmap.monochrome
if self.colors is not None:
# Generate integers for direct indexing.
i0, i1 = 0, len(self.levels)
if self.filled:
i1 -= 1
# Out of range indices for over and under:
if self.extend in ('both', 'min'):
i0 = -1
if self.extend in ('both', 'max'):
i1 += 1
self.cvalues = list(range(i0, i1))
self.set_norm(colors.NoNorm())
else:
self.cvalues = self.layers
self.set_array(self.levels)
self.autoscale_None()
if self.extend in ('both', 'max', 'min'):
self.norm.clip = False
# self.tcolors are set by the "changed" method
def _process_linewidths(self):
linewidths = self.linewidths
Nlev = len(self.levels)
if linewidths is None:
tlinewidths = [(mpl.rcParams['lines.linewidth'],)] * Nlev
else:
if not cbook.iterable(linewidths):
linewidths = [linewidths] * Nlev
else:
linewidths = list(linewidths)
if len(linewidths) < Nlev:
nreps = int(np.ceil(Nlev / len(linewidths)))
linewidths = linewidths * nreps
if len(linewidths) > Nlev:
linewidths = linewidths[:Nlev]
tlinewidths = [(w,) for w in linewidths]
return tlinewidths
def _process_linestyles(self):
linestyles = self.linestyles
Nlev = len(self.levels)
if linestyles is None:
tlinestyles = ['solid'] * Nlev
if self.monochrome:
neg_ls = mpl.rcParams['contour.negative_linestyle']
eps = - (self.zmax - self.zmin) * 1e-15
for i, lev in enumerate(self.levels):
if lev < eps:
tlinestyles[i] = neg_ls
else:
if cbook.is_string_like(linestyles):
tlinestyles = [linestyles] * Nlev
elif cbook.iterable(linestyles):
tlinestyles = list(linestyles)
if len(tlinestyles) < Nlev:
nreps = int(np.ceil(Nlev / len(linestyles)))
tlinestyles = tlinestyles * nreps
if len(tlinestyles) > Nlev:
tlinestyles = tlinestyles[:Nlev]
else:
raise ValueError("Unrecognized type for linestyles kwarg")
return tlinestyles
def get_alpha(self):
"""returns alpha to be applied to all ContourSet artists"""
return self.alpha
def set_alpha(self, alpha):
"""sets alpha for all ContourSet artists"""
self.alpha = alpha
self.changed()
def find_nearest_contour(self, x, y, indices=None, pixel=True):
"""
Finds contour that is closest to a point. Defaults to
measuring distance in pixels (screen space - useful for manual
contour labeling), but this can be controlled via a keyword
argument.
Returns a tuple containing the contour, segment, index of
segment, x & y of segment point and distance to minimum point.
Optional keyword arguments:
*indices*:
Indexes of contour levels to consider when looking for
nearest point. Defaults to using all levels.
*pixel*:
If *True*, measure distance in pixel space, if not, measure
distance in axes space. Defaults to *True*.
"""
# This function uses a method that is probably quite
# inefficient based on converting each contour segment to
# pixel coordinates and then comparing the given point to
# those coordinates for each contour. This will probably be
# quite slow for complex contours, but for normal use it works
# sufficiently well that the time is not noticeable.
# Nonetheless, improvements could probably be made.
if indices is None:
indices = list(xrange(len(self.levels)))
dmin = np.inf
conmin = None
segmin = None
xmin = None
ymin = None
point = np.array([x, y])
for icon in indices:
con = self.collections[icon]
trans = con.get_transform()
paths = con.get_paths()
for segNum, linepath in enumerate(paths):
lc = linepath.vertices
# transfer all data points to screen coordinates if desired
if pixel:
lc = trans.transform(lc)
d, xc, leg = _find_closest_point_on_path(lc, point)
if d < dmin:
dmin = d
conmin = icon
segmin = segNum
imin = leg[1]
xmin = xc[0]
ymin = xc[1]
return (conmin, segmin, imin, xmin, ymin, dmin)
class QuadContourSet(ContourSet):
"""
Create and store a set of contour lines or filled regions.
User-callable method: :meth:`clabel`
Useful attributes:
ax:
The axes object in which the contours are drawn
collections:
A silent_list of LineCollections or PolyCollections
levels:
Contour levels
layers:
Same as levels for line contours; half-way between
levels for filled contours. See :meth:`_process_colors` method.
"""
def _process_args(self, *args, **kwargs):
"""
Process args and kwargs.
"""
if isinstance(args[0], QuadContourSet):
if self.levels is None:
self.levels = args[0].levels
self.zmin = args[0].zmin
self.zmax = args[0].zmax
self._corner_mask = args[0]._corner_mask
if self._corner_mask == 'legacy':
contour_generator = args[0].Cntr
else:
contour_generator = args[0]._contour_generator
self._mins = args[0]._mins
self._maxs = args[0]._maxs
else:
self._corner_mask = kwargs.get('corner_mask', None)
if self._corner_mask is None:
self._corner_mask = mpl.rcParams['contour.corner_mask']
x, y, z = self._contour_args(args, kwargs)
_mask = ma.getmask(z)
if _mask is ma.nomask or not _mask.any():
_mask = None
if self._corner_mask == 'legacy':
cbook.warn_deprecated('1.5',
name="corner_mask='legacy'",
alternative='corner_mask=False or True')
contour_generator = _cntr.Cntr(x, y, z.filled(), _mask)
else:
contour_generator = _contour.QuadContourGenerator(
x, y, z.filled(), _mask, self._corner_mask, self.nchunk)
t = self.get_transform()
# if the transform is not trans data, and some part of it
# contains transData, transform the xs and ys to data coordinates
if (t != self.ax.transData and
any(t.contains_branch_seperately(self.ax.transData))):
trans_to_data = t - self.ax.transData
pts = (np.vstack([x.flat, y.flat]).T)
transformed_pts = trans_to_data.transform(pts)
x = transformed_pts[..., 0]
y = transformed_pts[..., 1]
self._mins = [ma.min(x), ma.min(y)]
self._maxs = [ma.max(x), ma.max(y)]
if self._corner_mask == 'legacy':
self.Cntr = contour_generator
else:
self._contour_generator = contour_generator
def _get_allsegs_and_allkinds(self):
"""
Create and return allsegs and allkinds by calling underlying C code.
"""
allsegs = []
if self.filled:
lowers, uppers = self._get_lowers_and_uppers()
allkinds = []
for level, level_upper in zip(lowers, uppers):
if self._corner_mask == 'legacy':
nlist = self.Cntr.trace(level, level_upper,
nchunk=self.nchunk)
nseg = len(nlist) // 2
vertices = nlist[:nseg]
kinds = nlist[nseg:]
else:
vertices, kinds = \
self._contour_generator.create_filled_contour(
level, level_upper)
allsegs.append(vertices)
allkinds.append(kinds)
else:
allkinds = None
for level in self.levels:
if self._corner_mask == 'legacy':
nlist = self.Cntr.trace(level)
nseg = len(nlist) // 2
vertices = nlist[:nseg]
else:
vertices = self._contour_generator.create_contour(level)
allsegs.append(vertices)
return allsegs, allkinds
def _contour_args(self, args, kwargs):
if self.filled:
fn = 'contourf'
else:
fn = 'contour'
Nargs = len(args)
if Nargs <= 2:
z = ma.asarray(args[0], dtype=np.float64)
x, y = self._initialize_x_y(z)
args = args[1:]
elif Nargs <= 4:
x, y, z = self._check_xyz(args[:3], kwargs)
args = args[3:]
else:
raise TypeError("Too many arguments to %s; see help(%s)" %
(fn, fn))
z = ma.masked_invalid(z, copy=False)
self.zmax = float(z.max())
self.zmin = float(z.min())
if self.logscale and self.zmin <= 0:
z = ma.masked_where(z <= 0, z)
warnings.warn('Log scale: values of z <= 0 have been masked')
self.zmin = float(z.min())
self._contour_level_args(z, args)
return (x, y, z)
def _check_xyz(self, args, kwargs):
"""
For functions like contour, check that the dimensions
of the input arrays match; if x and y are 1D, convert
them to 2D using meshgrid.
Possible change: I think we should make and use an ArgumentError
Exception class (here and elsewhere).
"""
x, y = args[:2]
self.ax._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x = self.ax.convert_xunits(x)
y = self.ax.convert_yunits(y)
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
z = ma.asarray(args[2], dtype=np.float64)
if z.ndim != 2:
raise TypeError("Input z must be a 2D array.")
else:
Ny, Nx = z.shape
if x.ndim != y.ndim:
raise TypeError("Number of dimensions of x and y should match.")
if x.ndim == 1:
nx, = x.shape
ny, = y.shape
if nx != Nx:
raise TypeError("Length of x must be number of columns in z.")
if ny != Ny:
raise TypeError("Length of y must be number of rows in z.")
x, y = np.meshgrid(x, y)
elif x.ndim == 2:
if x.shape != z.shape:
raise TypeError("Shape of x does not match that of z: found "
"{0} instead of {1}.".format(x.shape, z.shape))
if y.shape != z.shape:
raise TypeError("Shape of y does not match that of z: found "
"{0} instead of {1}.".format(y.shape, z.shape))
else:
raise TypeError("Inputs x and y must be 1D or 2D.")
return x, y, z
def _initialize_x_y(self, z):
"""
Return X, Y arrays such that contour(Z) will match imshow(Z)
if origin is not None.
The center of pixel Z[i,j] depends on origin:
if origin is None, x = j, y = i;
if origin is 'lower', x = j + 0.5, y = i + 0.5;
if origin is 'upper', x = j + 0.5, y = Nrows - i - 0.5
If extent is not None, x and y will be scaled to match,
as in imshow.
If origin is None and extent is not None, then extent
will give the minimum and maximum values of x and y.
"""
if z.ndim != 2:
raise TypeError("Input must be a 2D array.")
else:
Ny, Nx = z.shape
if self.origin is None: # Not for image-matching.
if self.extent is None:
return np.meshgrid(np.arange(Nx), np.arange(Ny))
else:
x0, x1, y0, y1 = self.extent
x = np.linspace(x0, x1, Nx)
y = np.linspace(y0, y1, Ny)
return np.meshgrid(x, y)
# Match image behavior:
if self.extent is None:
x0, x1, y0, y1 = (0, Nx, 0, Ny)
else:
x0, x1, y0, y1 = self.extent
dx = float(x1 - x0) / Nx
dy = float(y1 - y0) / Ny
x = x0 + (np.arange(Nx) + 0.5) * dx
y = y0 + (np.arange(Ny) + 0.5) * dy
if self.origin == 'upper':
y = y[::-1]
return np.meshgrid(x, y)
contour_doc = """
Plot contours.
:func:`~matplotlib.pyplot.contour` and
:func:`~matplotlib.pyplot.contourf` draw contour lines and
filled contours, respectively. Except as noted, function
signatures and return values are the same for both versions.
:func:`~matplotlib.pyplot.contourf` differs from the MATLAB
version in that it does not draw the polygon edges.
To draw edges, add line contours with
calls to :func:`~matplotlib.pyplot.contour`.
Call signatures::
contour(Z)
make a contour plot of an array *Z*. The level values are chosen
automatically.
::
contour(X,Y,Z)
*X*, *Y* specify the (x, y) coordinates of the surface
::
contour(Z,N)
contour(X,Y,Z,N)
contour up to *N* automatically-chosen levels.
::
contour(Z,V)
contour(X,Y,Z,V)
draw contour lines at the values specified in sequence *V*,
which must be in increasing order.
::
contourf(..., V)
fill the ``len(V)-1`` regions between the values in *V*,
which must be in increasing order.
::
contour(Z, **kwargs)
Use keyword args to control colors, linewidth, origin, cmap ... see
below for more details.
*X* and *Y* must both be 2-D with the same shape as *Z*, or they
must both be 1-D such that ``len(X)`` is the number of columns in
*Z* and ``len(Y)`` is the number of rows in *Z*.
``C = contour(...)`` returns a
:class:`~matplotlib.contour.QuadContourSet` object.
Optional keyword arguments:
*corner_mask*: [ *True* | *False* | 'legacy' ]
Enable/disable corner masking, which only has an effect if *Z* is
a masked array. If *False*, any quad touching a masked point is
masked out. If *True*, only the triangular corners of quads
nearest those points are always masked out, other triangular
corners comprising three unmasked points are contoured as usual.
If 'legacy', the old contouring algorithm is used, which is
equivalent to *False* and is deprecated, only remaining whilst the
new algorithm is tested fully.
If not specified, the default is taken from
rcParams['contour.corner_mask'], which is True unless it has
been modified.
*colors*: [ *None* | string | (mpl_colors) ]
If *None*, the colormap specified by cmap will be used.
If a string, like 'r' or 'red', all levels will be plotted in this
color.
If a tuple of matplotlib color args (string, float, rgb, etc),
different levels will be plotted in different colors in the order
specified.
*alpha*: float
The alpha blending value
*cmap*: [ *None* | Colormap ]
A cm :class:`~matplotlib.colors.Colormap` instance or
*None*. If *cmap* is *None* and *colors* is *None*, a
default Colormap is used.
*norm*: [ *None* | Normalize ]
A :class:`matplotlib.colors.Normalize` instance for
scaling data values to colors. If *norm* is *None* and
*colors* is *None*, the default linear scaling is used.
*vmin*, *vmax*: [ *None* | scalar ]
If not *None*, either or both of these values will be
supplied to the :class:`matplotlib.colors.Normalize`
instance, overriding the default color scaling based on
*levels*.
*levels*: [level0, level1, ..., leveln]
A list of floating point numbers indicating the level
curves to draw, in increasing order; e.g., to draw just
the zero contour pass ``levels=[0]``
*origin*: [ *None* | 'upper' | 'lower' | 'image' ]
If *None*, the first value of *Z* will correspond to the
lower left corner, location (0,0). If 'image', the rc
value for ``image.origin`` will be used.
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*extent*: [ *None* | (x0,x1,y0,y1) ]
If *origin* is not *None*, then *extent* is interpreted as
in :func:`matplotlib.pyplot.imshow`: it gives the outer
pixel boundaries. In this case, the position of Z[0,0]
is the center of the pixel, not a corner. If *origin* is
*None*, then (*x0*, *y0*) is the position of Z[0,0], and
(*x1*, *y1*) is the position of Z[-1,-1].
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*locator*: [ *None* | ticker.Locator subclass ]
If *locator* is *None*, the default
:class:`~matplotlib.ticker.MaxNLocator` is used. The
locator is used to determine the contour levels if they
are not given explicitly via the *V* argument.
*extend*: [ 'neither' | 'both' | 'min' | 'max' ]
Unless this is 'neither', contour levels are automatically
added to one or both ends of the range so that all data
are included. These added ranges are then mapped to the
special colormap values which default to the ends of the
colormap range, but can be set via
:meth:`matplotlib.colors.Colormap.set_under` and
:meth:`matplotlib.colors.Colormap.set_over` methods.
*xunits*, *yunits*: [ *None* | registered units ]
Override axis units by specifying an instance of a
:class:`matplotlib.units.ConversionInterface`.
*antialiased*: [ *True* | *False* ]
enable antialiasing, overriding the defaults. For
filled contours, the default is *True*. For line contours,
it is taken from rcParams['lines.antialiased'].
*nchunk*: [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive integer to
divide the domain into subdomains of *nchunk* by *nchunk* quads.
Chunking reduces the maximum length of polygons generated by the
contouring algorithm which reduces the rendering workload passed
on to the backend and also requires slightly less RAM. It can
however introduce rendering artifacts at chunk boundaries depending
on the backend, the *antialiased* flag and value of *alpha*.
contour-only keyword arguments:
*linewidths*: [ *None* | number | tuple of numbers ]
If *linewidths* is *None*, the default width in
``lines.linewidth`` in ``matplotlibrc`` is used.
If a number, all levels will be plotted with this linewidth.
If a tuple, different levels will be plotted with different
linewidths in the order specified.
*linestyles*: [ *None* | 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
If *linestyles* is *None*, the default is 'solid' unless
the lines are monochrome. In that case, negative
contours will take their linestyle from the ``matplotlibrc``
``contour.negative_linestyle`` setting.
*linestyles* can also be an iterable of the above strings
specifying a set of linestyles to be used. If this
iterable is shorter than the number of contour levels
it will be repeated as necessary.
contourf-only keyword arguments:
*hatches*:
A list of cross hatch patterns to use on the filled areas.
If None, no hatching will be added to the contour.
Hatching is supported in the PostScript, PDF, SVG and Agg
backends only.
Note: contourf fills intervals that are closed at the top; that
is, for boundaries *z1* and *z2*, the filled region is::
z1 < z <= z2
There is one exception: if the lowest boundary coincides with
the minimum value of the *z* array, then that minimum value
will be included in the lowest interval.
**Examples:**
.. plot:: mpl_examples/pylab_examples/contour_demo.py
.. plot:: mpl_examples/pylab_examples/contourf_demo.py
.. plot:: mpl_examples/pylab_examples/contour_corner_mask.py
"""
| gpl-3.0 |
DonghoChoi/ISB_Project | local/search_log_analysis_dyad.py | 2 | 18543 | #!/usr/bin/python
# Author: Dongho Choi
import os.path
import datetime
import time
import sys
import itertools
import pandas as pd
import numpy as np
from sshtunnel import SSHTunnelForwarder # for SSH connection
import pymysql.cursors # MySQL handling API
import sys
sys.path.append("./configs/")
import server_config # (1) info2_server (2) exploration_db
# Global Variables
dwellTime_cut = 30
participants_list = []
participants_combinations = []
'''
df_participants = pd.DataFrame()
df_pages_lab = pd.DataFrame()
df_queries = pd.DataFrame()
'''
df_query_second_task = pd.DataFrame(
columns=('userID', 'url', 'time', 'stageID', 'questionID', 'source', 'host', 'query', 'dwellTime'))
#df_visits_second_task = pd.DataFrame(columns=('userID', 'url', 'stageID', 'source', 'host', 'dwellTime'))
df_visits_second_task = pd.DataFrame()
list_universe_visits_second_task = []
list_relevent_universe_visits_second_task = []
def get_participants_list(arg_pd):
return arg_pd['userID']
def get_Coverage_Individual(user_a):
df_user_a_visits_second_task = df_visits_second_task.loc[df_visits_second_task['userID'] == user_a]
total_visits = df_user_a_visits_second_task['url'].drop_duplicates().values.tolist()
print("Coverage of {0}: {1}".format(user_a, len(total_visits)))
return len(total_visits)
def get_Unique_Coverage_Individual(user_a):
df_user_a_visits_second_task = df_visits_second_task.loc[df_visits_second_task['userID'] == user_a]
df_user_others_visits_second_task = df_visits_second_task.loc[~(df_visits_second_task['userID'] == user_a)]
set_user_a_visits_second_task = set(df_user_a_visits_second_task['url'].drop_duplicates().values)
set_user_others_visits_second_task = set(df_user_others_visits_second_task['url'].drop_duplicates().values)
set_unique_coverage = set_user_a_visits_second_task - set_user_others_visits_second_task
print("Unique Coverage of {0}: {1}".format(user_a, len(set_unique_coverage)))
return len(set_unique_coverage)
def get_Useful_Coverage_Individual(user_a):
df_user_a_visits_second_task = df_visits_second_task.loc[(df_visits_second_task['userID'] == user_a) & (df_visits_second_task['relevance'] == 1)]
total_visits = df_user_a_visits_second_task['url'].drop_duplicates().values.tolist()
print("Useful Coverage of {0}: {1}".format(user_a, len(total_visits)))
return len(total_visits)
def get_Unique_Useful_Coverage_Individual(user_a):
df_relevant_visits = df_visits_second_task.loc[df_visits_second_task['relevance']==1]
df_user_a_visits = df_relevant_visits.loc[(df_relevant_visits['userID'] == user_a)]
df_user_others_visits = df_relevant_visits.loc[~(df_relevant_visits['userID'] == user_a)]
set_user_a_visits = set(df_user_a_visits['url'].drop_duplicates().values)
set_user_others_visits = set(df_user_others_visits['url'].drop_duplicates().values)
set_unique_useful_coverage = set_user_a_visits - set_user_others_visits
print("Unique Useful Coverage of {0}: {1}".format(user_a, len(set_unique_useful_coverage)))
return len(set_unique_useful_coverage)
def get_Coverage_Dyad(user_a, user_b): # arg: both user_a and user_b are integer-type
df_user_a_visits_second_task = df_visits_second_task.loc[df_visits_second_task['userID'] == user_a]
df_user_b_visits_second_task = df_visits_second_task.loc[df_visits_second_task['userID'] == user_b]
total_visits = df_user_a_visits_second_task.append(df_user_b_visits_second_task)['url'].drop_duplicates().values.tolist()
#print(total_visits)
#total_visits = total_visits.drop_duplicates('url')
print("Coverage of {0} and {1}: {2}".format(user_a, user_b, len(total_visits)))
return len(total_visits)
def get_Useful_Coverage_Dyad(user_a, user_b): # arg: both user_a and user_b are integer-type
df_user_a_visits_second_task = df_visits_second_task.loc[(df_visits_second_task['userID'] == user_a) & (df_visits_second_task['relevance'] == 1)]
df_user_b_visits_second_task = df_visits_second_task.loc[(df_visits_second_task['userID'] == user_b) & (df_visits_second_task['relevance'] == 1)]
total_visits = df_user_a_visits_second_task.append(df_user_b_visits_second_task)['url'].drop_duplicates().values.tolist()
#total_visits = total_visits.drop_duplicates('url')
print("Useful Coverage of {0} and {1}: {2}".format(user_a, user_b, len(total_visits)))
return len(total_visits)
def get_Unique_Coverage_Dyad(user_a, user_b): # arg: both user_a and user_b are integer-type
df_user_a_b_visits_second_task = df_visits_second_task.loc[(df_visits_second_task['userID'] == user_a) | (df_visits_second_task['userID'] == user_b)]
df_user_others_visits_second_task = df_visits_second_task.loc[~((df_visits_second_task['userID'] == user_a) | (df_visits_second_task['userID'] == user_b))]
set_user_a_b_visits = set(df_user_a_b_visits_second_task['url'].drop_duplicates().values)
set_user_others_visits = set(df_user_others_visits_second_task['url'].drop_duplicates().values)
set_unique_Coverage = set_user_a_b_visits - set_user_others_visits
print("Unique Coverage of {0} and {1}: {2}".format(user_a, user_b, len(set_unique_Coverage)))
return len(set_unique_Coverage)
def get_Unique_Useful_Coverage_Dyad(user_a, user_b):
df_relevant_visits = df_visits_second_task.loc[df_visits_second_task['relevance']==1]
df_user_a_b_visits = df_relevant_visits.loc[(df_relevant_visits['userID'] == user_a) | (df_relevant_visits['userID'] == user_b)]
df_user_others_visits = df_relevant_visits.loc[~((df_relevant_visits['userID'] == user_a) | (df_relevant_visits['userID'] == user_b))]
set_user_a_b_visits = set(df_user_a_b_visits['url'].drop_duplicates().values)
set_user_others_visits = set(df_user_others_visits['url'].drop_duplicates().values)
set_unique_useful_coverage = set_user_a_b_visits - set_user_others_visits
print("Unique Useful Coverage of {0} and {1}: {2}".format(user_a, user_b, len(set_unique_useful_coverage)))
return len(set_unique_useful_coverage)
if __name__ == "__main__":
# READ DATA FROM SERVER
#read_Data_from_Server()
# Server connection
server = SSHTunnelForwarder(
(server_config.info2_server['host'], 22),
ssh_username=server_config.info2_server['user'],
ssh_password=server_config.info2_server['password'],
remote_bind_address=('127.0.0.1', 3306))
server.start()
connection = pymysql.connect(host='127.0.0.1',
port=server.local_bind_port,
user=server_config.exploration_db['user'],
password=server_config.exploration_db['password'],
db=server_config.exploration_db['database'])
connection.autocommit(True)
cursor = connection.cursor()
print("MySQL connection established.")
# Get the participants list from the table of 'final_participants'
df_participants = pd.read_sql('SELECT * FROM final_participants', con=connection)
print("Participants Table READ")
# Get the pages_lab table: importing all pages that include some pages related to study as well as new tab, etc.
#df_pages_lab = pd.read_sql("SELECT userID,url,localTime,stageID,questionID,source,host,query FROM pages_lab WHERE (userID!=5001 AND (source NOT LIKE 'peopleanalytics') AND (source NOT LIKE ''))", con=connection)
df_pages_lab = pd.read_sql("SELECT userID,url,time,stageID,questionID,source,host,query FROM pages_lab WHERE (userID!=5001)", con=connection)
print("Pages Table READ")
# Get the copy_data table
#df_copy_data = pd.read_sql('SELECT * FROM copy_data', con=connection)
# Get the queries table
df_queries = pd.read_sql('SELECT * FROM queries WHERE (userID!=5001)', con=connection)
# Get the demographic survey responses
#df_demographic = pd.read_sql('SELECT * FROM questionnaire_demographic', con=connection)
# Get the spatial capability score
#df_spatial = pd.read_sql('SELECT * FROM spatial_capability', con=connection)
#server.stop()
# READ AND FILL THE PARTICIPANTS LIST WITH COMBINATIONS
participants_list = df_participants['userID'].tolist()
num_participants = len(participants_list) # number of participants
print('number of participants:{0}'.format(num_participants))
participants_combinations = [list(x) for x in itertools.combinations(participants_list, 2)]
print("participants_combinations:", participants_combinations)
num_combinations = len(participants_combinations)
## CALCULATE DWELL TIME BETWEEN PAGES
df_pages_with_dwell_time = pd.DataFrame(columns=('userID','url','time','stageID','questionID','source','host','query','dwellTime'))
for i in range(0, num_participants):
#for i in range(0,3):
current_userID = participants_list[i]
df_temp_pages = df_pages_lab.loc[df_pages_lab['userID']==current_userID] # Data of current user
second_start = False
for j in range(0,len(df_temp_pages)-1):
if (df_temp_pages.iloc[j]['stageID'] == 41) and (second_start == False): # Marking the start time of the exploratory search session
startTime_second_task = df_temp_pages.iloc[j]['time']
print("start time of second task of user {0}: {1}".format(current_userID, startTime_second_task))
second_start = True
df_temp = df_temp_pages.iloc[j]
dwellTime = df_temp_pages.iloc[j+1]['time'] - df_temp_pages.iloc[j]['time']
df_temp['dwellTime'] = dwellTime.total_seconds()
df_pages_with_dwell_time = df_pages_with_dwell_time.append(df_temp)
df_temp = df_temp_pages.iloc[len(df_temp_pages)-1]
dwellTime = (startTime_second_task + datetime.timedelta(minutes=20) - df_temp['time']).total_seconds()
if (dwellTime > 0):
df_temp['dwellTime'] = dwellTime
df_pages_with_dwell_time = df_pages_with_dwell_time.append(df_temp)
# GETTING RID OF UNNECESSARY PAGES
df_pages_with_dwell_time = df_pages_with_dwell_time[~df_pages_with_dwell_time['source'].str.contains("peopleanalytics")]
df_pages_with_dwell_time = df_pages_with_dwell_time[~df_pages_with_dwell_time['url'].str.contains("about:")]
#print(df_pages_with_dwell_time)
#print("line:", len(df_pages_with_dwell_time))
# BUILD EXPLORATORY SEARCH VISIT PAGES
df_pages_second_task = pd.DataFrame(columns=('userID','url','time','stageID','questionID','source','host','query','dwellTime'))
for i in range(0,num_participants):
#for i in range(0, 3):
#current_userID = participants_list.iloc[i]['userID']
current_userID = participants_list[i]
df_temp_pages = df_pages_with_dwell_time.loc[df_pages_with_dwell_time['userID'] == current_userID]
df_temp_pages_first_task = df_temp_pages.loc[df_temp_pages['stageID']==31] # Visited pages during the first task
list_pages_first_task = df_temp_pages_first_task['url'].tolist()
df_temp_pages_second_task = df_temp_pages.loc[df_temp_pages['stageID']==41] # Visited pages during the second task
for j in range(0,len(df_temp_pages_second_task.index)):
if (df_temp_pages_second_task.iloc[j]['url'] not in list_pages_first_task):
df_pages_second_task = df_pages_second_task.append(df_temp_pages_second_task.iloc[j])
'''
print("duplicated")
else:
print("new in exploratory search")
df_pages_second_task = df_pages_second_task.append(df_temp_pages_second_task.iloc[j])
'''
#print(df_pages_second_task)
# BUILD SERP/QUERY TABLE and MERGE VISITING PAGES FOR EXPLORATORY TASK
for i in range(0,num_participants):
#for i in range(0,3):
#current_userID = participants_list.iloc[i]['userID']
current_userID = participants_list[i]
df_temp_pages = df_pages_second_task.loc[df_pages_lab['userID'] == current_userID]
'''
df_temp_query_second_task = df_temp_pages.loc[''(df_temp_pages['query'] == "")] # SERP during the second task
df_query_second_task = df_query_second_task.append(df_temp_pages_second_task)
'''
df_temp_visits_second_task = df_temp_pages.loc[df_temp_pages['query'] == ""] # Webpages visits
df_temp_visits_second_task['totaldwellTime'] = df_temp_visits_second_task.groupby('url')['dwellTime'].transform('sum')
df_temp_visits_second_task.drop(['time','dwellTime','questionID','query'], axis=1, inplace=True)
df_temp_visits_second_task = df_temp_visits_second_task.drop_duplicates('url')
df_temp_visits_second_task.rename(columns={'totaldwellTime': 'dwellTime'}, inplace=True)
df_temp_visits_second_task['relevance'] = 0
df_temp_visits_second_task.loc[df_temp_visits_second_task['dwellTime'] >= dwellTime_cut,'relevance'] = 1
df_visits_second_task = df_visits_second_task.append(df_temp_visits_second_task)
#print("###### web pages in second task #####")
#print(df_visits_second_task)
# CREATE UNIVERSE OF COVERAGE
list_universe_visits_second_task = df_visits_second_task['url'].drop_duplicates().values.tolist()
print("universe of coverage size:", len(list_universe_visits_second_task))
list_relevent_universe_visits_second_task = df_visits_second_task.loc[df_visits_second_task['relevance']==1]['url'].drop_duplicates().values.tolist()
print("universe of relevant coverage size:", len(list_relevent_universe_visits_second_task))
# QUERY
# DATA OF INDIVIDUALS
df_individual_data = pd.DataFrame(index=range(0, num_participants),columns=('userID', 'Coverage', 'UniqueCoverage', 'UsefulCoverage', 'UniqueUsefulCoverage'))
print(df_individual_data)
for i in range(0,num_participants):
#df_temp_individuals_data = pd.DataFrame(columns=('userID', 'Coverage', 'UniqueCoverage', 'RelevantCoverage', 'UniqueRelevantCoverage'))
current_userID = participants_list[i]
'''
df_individual_data.set_value(i,'userID', current_userID)
df_individual_data.set_value(i,'Coverage',get_Coverage_Individual(current_userID))
df_individual_data.set_value(i,'Coverage',get_Coverage_Individual(current_userID))
df_individual_data.set_value(i,'UniqueCoverage',get_Unique_Coverage_Individual(current_userID))
df_individual_data.set_value(i,'UsefulCoverage',get_Useful_Coverage_Individual(current_userID))
df_individual_data.set_value(i,'UniqueUsefulCoverage', get_Unique_Useful_Coverage_Individual(current_userID))
'''
df_individual_data.iloc[i, df_individual_data.columns.get_loc('userID')] = current_userID
df_individual_data.iloc[i, df_individual_data.columns.get_loc('Coverage')] = get_Coverage_Individual(current_userID)
df_individual_data.iloc[i, df_individual_data.columns.get_loc('UniqueCoverage')] = get_Unique_Coverage_Individual(current_userID)
df_individual_data.iloc[i, df_individual_data.columns.get_loc('UsefulCoverage')] = get_Useful_Coverage_Individual(current_userID)
df_individual_data.iloc[i, df_individual_data.columns.get_loc('UniqueUsefulCoverage')] = get_Unique_Useful_Coverage_Individual(current_userID)
print("##### Individual Data ######")
print(df_individual_data)
# DATA OF DYADS
df_dyad_data = pd.DataFrame(index=range(0, num_combinations),columns=('user_a', 'user_b','Coverage', 'UniqueCoverage', 'UsefulCoverage', 'UniqueUsefulCoverage'))
for i in range(0, num_combinations):
pair = participants_combinations[i]
user_a = pair[0]
user_b = pair[1]
'''
df_dyad_data.set_value(i,'user_a',user_a)
df_dyad_data.set_value(i,'user_b',user_b)
df_dyad_data.set_value(i, 'Coverage', get_Coverage_Dyad(user_a, user_b))
df_dyad_data.set_value(i, 'UniqueCoverage', get_Unique_Coverage_Dyad(user_a, user_b))
df_dyad_data.set_value(i, 'UsefulCoverage', get_Useful_Coverage_Dyad(user_a, user_b))
df_dyad_data.set_value(i, 'UniqueUsefulCoverage', get_Unique_Useful_Coverage_Dyad(user_a, user_b))
'''
df_dyad_data.iloc[i, df_dyad_data.columns.get_loc('user_a')] = user_a
df_dyad_data.iloc[i, df_dyad_data.columns.get_loc('user_b')] = user_b
df_dyad_data.iloc[i, df_dyad_data.columns.get_loc('Coverage')] = get_Coverage_Dyad(user_a, user_b)
df_dyad_data.iloc[i, df_dyad_data.columns.get_loc('UniqueCoverage')] = get_Unique_Coverage_Dyad(user_a, user_b)
df_dyad_data.iloc[i, df_dyad_data.columns.get_loc('UsefulCoverage')] = get_Useful_Coverage_Dyad(user_a, user_b)
df_dyad_data.iloc[i, df_dyad_data.columns.get_loc('UniqueUsefulCoverage')] = get_Unique_Useful_Coverage_Dyad(user_a, user_b)
print("##### Dyad Data #####")
print(df_dyad_data)
# SAVE DATAFRAMES INTO SERVER
# INDIVIDUAL DATA
sql = "DROP TABLE IF EXISTS individual_data;"
cursor.execute(sql)
sql = "CREATE TABLE individual_data (userID int(11), Coverage int(11), UniqueCoverage int(11), UsefulCoverage int(11), UniqueUsefulCoverage int(11));"
cursor.execute(sql)
for i in range(0,num_participants):
sql = "INSERT INTO individual_data (userID,Coverage,UniqueCoverage,UsefulCoverage,UniqueUsefulCoverage) VALUES (" + \
str(df_individual_data.iloc[i]['userID']) + "," + str(df_individual_data.iloc[i]['Coverage']) + "," + str(df_individual_data.iloc[i]['UniqueCoverage']) + "," + \
str(df_individual_data.iloc[i]['UsefulCoverage']) + "," + str(df_individual_data.iloc[i]['UniqueUsefulCoverage']) + ");"
cursor.execute(sql)
# DYAD DATA
sql = "DROP TABLE IF EXISTS dyad_data;"
cursor.execute(sql)
sql = "CREATE TABLE dyad_data (user_a int(11), user_b int(11), Coverage int(11), UniqueCoverage int(11), UsefulCoverage int(11), UniqueUsefulCoverage int(11));"
cursor.execute(sql)
for i in range(0, num_combinations):
sql = "INSERT INTO dyad_data (user_a,user_b,Coverage,UniqueCoverage,UsefulCoverage,UniqueUsefulCoverage) VALUES (" + \
str(df_dyad_data.iloc[i]['user_a']) + "," + str(df_dyad_data.iloc[i]['user_b']) + "," +str(df_dyad_data.iloc[i]['Coverage']) + "," + str(
df_dyad_data.iloc[i]['UniqueCoverage']) + "," + \
str(df_dyad_data.iloc[i]['UsefulCoverage']) + "," + str(
df_dyad_data.iloc[i]['UniqueUsefulCoverage']) + ");"
cursor.execute(sql)
server.stop()
print("end")
| gpl-3.0 |
neherlab/ffpopsim | examples/drift_vs_draft.py | 2 | 3032 | # vim: fdm=indent
'''
author: Richard Neher, Fabio Zanini
date: 23/08/12
content: Example of haploid_highd showing how neutral alleles are affected
by linked selective sweeps
'''
# Import modules (setting the path should not be necessary when the module is
# installed in the PYTHONPATH)
import sys
sys.path.insert(0, '../pkg/python')
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import FFPopSim as h
# specify parameters
L = 256 # simulate 256 loci
# set up population
pop = h.haploid_highd(L) # produce an instance of haploid_highd with L loci
pop.carrying_capacity = 50000 # set the average population size to 50000
pop.outcrossing_rate = 1 # make the species obligate outcrossing
pop.crossover_rate = 0.02 / pop.L # set the crossover rate of the segment to 2 centimorgans
pop.mutation_rate = 0.1 / pop.carrying_capacity # per locus mutation rate equal to 0.1/N
# set fitness landscape
selection_coefficients = 0.0*np.ones(pop.L) # most loci are neutral
m = 10
selection_coefficients[::m] = -0.1 # every m-th locus is strongly deleterious
pop.set_trait_additive(selection_coefficients) # trait 0 is by default fitness
# initialize the population in linkage equilibrium with the specified allele frequencies
initial_allele_frequencies = 0.5*np.ones(pop.L) # define some initial allele frequencies as 1/2
initial_allele_frequencies[::m] = 0.0 # set a subset of alleles to frequency 0
pop.set_allele_frequencies(initial_allele_frequencies, pop.carrying_capacity)
# evolve for 2000 generations and track the allele frequencies
maxgen = 2000
allele_frequencies = [pop.get_allele_frequencies()]
tp = [pop.generation]
while pop.generation < maxgen:
pop.evolve(10)
# save allele frequencies and time
allele_frequencies.append(pop.get_allele_frequencies())
tp.append(pop.generation)
# every 200 generations, make one of the deleterious mutations beneficial
if (pop.generation % 200 == 0):
print "generation:", pop.generation, 'out of', maxgen
# update fitness function
selection_coefficients[m*np.random.randint(0,25)] = 0.01
pop.set_trait_additive(selection_coefficients)
# convert to an array to enable slicing
allele_frequencies = np.array(allele_frequencies)
# plot the allele frequency trajectories
plt.figure()
# plot the selected mutations
for locus in xrange(0,pop.L,m):
plt.plot(tp, allele_frequencies[:,locus], c=cm.cool(locus),lw=2, ls='--')
# plot some neutral sites
for locus in xrange(5,pop.L,50):
plt.plot(tp, allele_frequencies[:,locus], c=cm.cool(locus), lw=2)
plt.title('Drift and draft')
plt.xlabel('Time [generations]')
plt.ylabel('Allele frequencies')
plt.text(100,0.85, "neutral alleles: solid")
plt.text(100,0.9, "sweeping alleles: dashed")
plt.text(100,0.765, "color indicates position \non the genome")
plt.ion()
plt.show()
| gpl-3.0 |
kipohl/ncanda-data-integration | scripts/reporting/np_data_subjects.py | 3 | 2403 | #!/usr/bin/env python
##
## See COPYING file distributed along with the ncanda-data-integration package
## for the copyright and license terms
##
"""
np_data_subjects.py
======================
Generate a list of eids for a special subset of subjects. This list can be used
in script/xnat/check_object_names
"""
import os
import sys
import csv
import redcap
import pandas as pd
dir_csv = '/fs/u00/alfonso/Desktop/subset.csv'
template_csv = '/fs/u00/alfonso/Desktop/row.csv'
subject_list = ['B-00017-M-0','B-80403-F-3','E-01008-M-3','E-00966-M-9']
forms=['mr_session_report', 'visit_date','demographics',
'waisiv_arithmetic', 'taylor_complex_figure_scores',
'waisiv_digit_span', 'stroop', 'np_waisiv_coding',
'np_wrat4_word_reading_and_math_computation',
'waisiv_letter_number_sequencing', 'mri_stroop',
'np_reyosterrieth_complex_figure', 'np_landolt_c',
'dkefs_colorword_interference', 'np_grooved_pegboard',
'wasiblock_design', 'cnp_summary',
'paced_auditory_serial_addition_test_pasat',
'np_reyosterrieth_complex_figure_files','np_ishihara',
'np_edinburgh_handedness_inventory', 'biological_np',
'delayed_discounting_100', 'np_modified_greglygraybiel_test_of_ataxia',
'delayed_discounting_1000','youth_report_1','clinical']
visits = ['baseline_visit_arm_1','1y_visit_arm_1']
def get_project_entry(args=None):
"""
Pulls the data from REDCap
"""
# Get API key.
summary_key_file = open(os.path.join(os.path.expanduser("~"),
'.server_config',
'redcap-dataentry-token'), 'r')
summary_api_key = summary_key_file.read().strip()
# Connect to API.
project_entry = redcap.Project('https://ncanda.sri.com/redcap/api/',
summary_api_key, verify_ssl=False)
return project_entry
def data_entry_fields(forms,project,arm):
"""
Gets the dataframe containing a specific arm from REDCap
"""
# Get a dataframe of fields
data_entry_raw = project.export_records(forms = forms, format='df',
events=arm)
return data_entry_raw
def main(args):
project_entry = get_project_entry()
project_df = data_entry_fields(forms,project_entry,visits)
# Filter
filter_df = project_df.ix[subject_list]
filter_df.to_csv(dir_csv)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
argv = parser.parse_args()
sys.exit(main(args=argv))
| bsd-3-clause |
asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/scipy/signal/wavelets.py | 67 | 10523 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.dual import eig
from scipy.special import comb
from scipy import linspace, pi, exp
from scipy.signal import convolve
__all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'cwt']
def daub(p):
"""
The coefficients for the FIR low-pass filter producing Daubechies wavelets.
p>=1 gives the order of the zero at f=1/2.
There are 2p filter coefficients.
Parameters
----------
p : int
Order of the zero at f=1/2, can have values from 1 to 34.
Returns
-------
daub : ndarray
Return
"""
sqrt = np.sqrt
if p < 1:
raise ValueError("p must be at least 1.")
if p == 1:
c = 1 / sqrt(2)
return np.array([c, c])
elif p == 2:
f = sqrt(2) / 8
c = sqrt(3)
return f * np.array([1 + c, 3 + c, 3 - c, 1 - c])
elif p == 3:
tmp = 12 * sqrt(10)
z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6
z1c = np.conj(z1)
f = sqrt(2) / 8
d0 = np.real((1 - z1) * (1 - z1c))
a0 = np.real(z1 * z1c)
a1 = 2 * np.real(z1)
return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1,
a0 - 3 * a1 + 3, 3 - a1, 1])
elif p < 35:
# construct polynomial and factor it
if p < 35:
P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1]
yj = np.roots(P)
else: # try different polynomial --- needs work
P = [comb(p - 1 + k, k, exact=1) / 4.0**k
for k in range(p)][::-1]
yj = np.roots(P) / 4
# for each root, compute two z roots, select the one with |z|>1
# Build up final polynomial
c = np.poly1d([1, 1])**p
q = np.poly1d([1])
for k in range(p - 1):
yval = yj[k]
part = 2 * sqrt(yval * (yval - 1))
const = 1 - 2 * yval
z1 = const + part
if (abs(z1)) < 1:
z1 = const - part
q = q * [1, -z1]
q = c * np.real(q)
# Normalize result
q = q / np.sum(q) * sqrt(2)
return q.c[::-1]
else:
raise ValueError("Polynomial factorization does not work "
"well for p too large.")
def qmf(hk):
"""
Return high-pass qmf filter from low-pass
Parameters
----------
hk : array_like
Coefficients of high-pass filter.
"""
N = len(hk) - 1
asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)]
return hk[::-1] * np.array(asgn)
def cascade(hk, J=7):
"""
Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients.
Parameters
----------
hk : array_like
Coefficients of low-pass filter.
J : int, optional
Values will be computed at grid points ``K/2**J``. Default is 7.
Returns
-------
x : ndarray
The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where
``len(hk) = len(gk) = N+1``.
phi : ndarray
The scaling function ``phi(x)`` at `x`:
``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N.
psi : ndarray, optional
The wavelet function ``psi(x)`` at `x`:
``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N.
`psi` is only returned if `gk` is not None.
Notes
-----
The algorithm uses the vector cascade algorithm described by Strang and
Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values
and slices for quick reuse. Then inserts vectors into final vector at the
end.
"""
N = len(hk) - 1
if (J > 30 - np.log2(N + 1)):
raise ValueError("Too many levels.")
if (J < 1):
raise ValueError("Too few levels.")
# construct matrices needed
nn, kk = np.ogrid[:N, :N]
s2 = np.sqrt(2)
# append a zero so that take works
thk = np.r_[hk, 0]
gk = qmf(hk)
tgk = np.r_[gk, 0]
indx1 = np.clip(2 * nn - kk, -1, N + 1)
indx2 = np.clip(2 * nn - kk + 1, -1, N + 1)
m = np.zeros((2, 2, N, N), 'd')
m[0, 0] = np.take(thk, indx1, 0)
m[0, 1] = np.take(thk, indx2, 0)
m[1, 0] = np.take(tgk, indx1, 0)
m[1, 1] = np.take(tgk, indx2, 0)
m *= s2
# construct the grid of points
x = np.arange(0, N * (1 << J), dtype=float) / (1 << J)
phi = 0 * x
psi = 0 * x
# find phi0, and phi1
lam, v = eig(m[0, 0])
ind = np.argmin(np.absolute(lam - 1))
# a dictionary with a binary representation of the
# evaluation points x < 1 -- i.e. position is 0.xxxx
v = np.real(v[:, ind])
# need scaling function to integrate to 1 so find
# eigenvector normalized to sum(v,axis=0)=1
sm = np.sum(v)
if sm < 0: # need scaling function to integrate to 1
v = -v
sm = -sm
bitdic = {'0': v / sm}
bitdic['1'] = np.dot(m[0, 1], bitdic['0'])
step = 1 << J
phi[::step] = bitdic['0']
phi[(1 << (J - 1))::step] = bitdic['1']
psi[::step] = np.dot(m[1, 0], bitdic['0'])
psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0'])
# descend down the levels inserting more and more values
# into bitdic -- store the values in the correct location once we
# have computed them -- stored in the dictionary
# for quicker use later.
prevkeys = ['1']
for level in range(2, J + 1):
newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys]
fac = 1 << (J - level)
for key in newkeys:
# convert key to number
num = 0
for pos in range(level):
if key[pos] == '1':
num += (1 << (level - 1 - pos))
pastphi = bitdic[key[1:]]
ii = int(key[0])
temp = np.dot(m[0, ii], pastphi)
bitdic[key] = temp
phi[num * fac::step] = temp
psi[num * fac::step] = np.dot(m[1, ii], pastphi)
prevkeys = newkeys
return x, phi, psi
def morlet(M, w=5.0, s=1.0, complete=True):
"""
Complex Morlet wavelet.
Parameters
----------
M : int
Length of the wavelet.
w : float, optional
Omega0. Default is 5
s : float, optional
Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1.
complete : bool, optional
Whether to use the complete or the standard version.
Returns
-------
morlet : (M,) ndarray
See Also
--------
scipy.signal.gausspulse
Notes
-----
The standard version::
pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2))
This commonly used wavelet is often referred to simply as the
Morlet wavelet. Note that this simplified version can cause
admissibility problems at low values of `w`.
The complete version::
pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2))
This version has a correction
term to improve admissibility. For `w` greater than 5, the
correction term is negligible.
Note that the energy of the return wavelet is not normalised
according to `s`.
The fundamental frequency of this wavelet in Hz is given
by ``f = 2*s*w*r / M`` where `r` is the sampling rate.
Note: This function was created before `cwt` and is not compatible
with it.
"""
x = linspace(-s * 2 * pi, s * 2 * pi, M)
output = exp(1j * w * x)
if complete:
output -= exp(-0.5 * (w**2))
output *= exp(-0.5 * (x**2)) * pi**(-0.25)
return output
def ricker(points, a):
"""
Return a Ricker wavelet, also known as the "Mexican hat wavelet".
It models the function:
``A (1 - x^2/a^2) exp(-x^2/2 a^2)``,
where ``A = 2/sqrt(3a)pi^1/4``.
Parameters
----------
points : int
Number of points in `vector`.
Will be centered around 0.
a : scalar
Width parameter of the wavelet.
Returns
-------
vector : (N,) ndarray
Array of length `points` in shape of ricker curve.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> points = 100
>>> a = 4.0
>>> vec2 = signal.ricker(points, a)
>>> print(len(vec2))
100
>>> plt.plot(vec2)
>>> plt.show()
"""
A = 2 / (np.sqrt(3 * a) * (np.pi**0.25))
wsq = a**2
vec = np.arange(0, points) - (points - 1.0) / 2
xsq = vec**2
mod = (1 - xsq / wsq)
gauss = np.exp(-xsq / (2 * wsq))
total = A * mod * gauss
return total
def cwt(data, wavelet, widths):
"""
Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(length,width)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(widths), len(data)).
Notes
-----
::
length = min(10 * width[ii], len(data))
cwt[ii,:] = signal.convolve(data, wavelet(length,
width[ii]), mode='same')
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 200, endpoint=False)
>>> sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)
>>> widths = np.arange(1, 31)
>>> cwtmatr = signal.cwt(sig, signal.ricker, widths)
>>> plt.imshow(cwtmatr, extent=[-1, 1, 31, 1], cmap='PRGn', aspect='auto',
... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
>>> plt.show()
"""
output = np.zeros([len(widths), len(data)])
for ind, width in enumerate(widths):
wavelet_data = wavelet(min(10 * width, len(data)), width)
output[ind, :] = convolve(data, wavelet_data,
mode='same')
return output
| mit |
nesterione/scikit-learn | examples/linear_model/plot_lasso_lars.py | 363 | 1080 | #!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
rs2/pandas | pandas/core/internals/ops.py | 1 | 4453 | from collections import namedtuple
from typing import TYPE_CHECKING, Iterator, List, Tuple
import numpy as np
from pandas._typing import ArrayLike
if TYPE_CHECKING:
from pandas.core.internals.blocks import Block # noqa:F401
from pandas.core.internals.managers import BlockManager # noqa:F401
BlockPairInfo = namedtuple(
"BlockPairInfo", ["lvals", "rvals", "locs", "left_ea", "right_ea", "rblk"]
)
def _iter_block_pairs(
left: "BlockManager", right: "BlockManager"
) -> Iterator[BlockPairInfo]:
# At this point we have already checked the parent DataFrames for
# assert rframe._indexed_same(lframe)
for n, blk in enumerate(left.blocks):
locs = blk.mgr_locs
blk_vals = blk.values
left_ea = not isinstance(blk_vals, np.ndarray)
rblks = right._slice_take_blocks_ax0(locs.indexer, only_slice=True)
# Assertions are disabled for performance, but should hold:
# if left_ea:
# assert len(locs) == 1, locs
# assert len(rblks) == 1, rblks
# assert rblks[0].shape[0] == 1, rblks[0].shape
for k, rblk in enumerate(rblks):
right_ea = not isinstance(rblk.values, np.ndarray)
lvals, rvals = _get_same_shape_values(blk, rblk, left_ea, right_ea)
info = BlockPairInfo(lvals, rvals, locs, left_ea, right_ea, rblk)
yield info
def operate_blockwise(
left: "BlockManager", right: "BlockManager", array_op
) -> "BlockManager":
# At this point we have already checked the parent DataFrames for
# assert rframe._indexed_same(lframe)
res_blks: List["Block"] = []
for lvals, rvals, locs, left_ea, right_ea, rblk in _iter_block_pairs(left, right):
res_values = array_op(lvals, rvals)
if left_ea and not right_ea and hasattr(res_values, "reshape"):
res_values = res_values.reshape(1, -1)
nbs = rblk._split_op_result(res_values)
# Assertions are disabled for performance, but should hold:
# if right_ea or left_ea:
# assert len(nbs) == 1
# else:
# assert res_values.shape == lvals.shape, (res_values.shape, lvals.shape)
_reset_block_mgr_locs(nbs, locs)
res_blks.extend(nbs)
# Assertions are disabled for performance, but should hold:
# slocs = {y for nb in res_blks for y in nb.mgr_locs.as_array}
# nlocs = sum(len(nb.mgr_locs.as_array) for nb in res_blks)
# assert nlocs == len(left.items), (nlocs, len(left.items))
# assert len(slocs) == nlocs, (len(slocs), nlocs)
# assert slocs == set(range(nlocs)), slocs
new_mgr = type(right)(res_blks, axes=right.axes, do_integrity_check=False)
return new_mgr
def _reset_block_mgr_locs(nbs: List["Block"], locs):
"""
Reset mgr_locs to correspond to our original DataFrame.
"""
for nb in nbs:
nblocs = locs.as_array[nb.mgr_locs.indexer]
nb.mgr_locs = nblocs
# Assertions are disabled for performance, but should hold:
# assert len(nblocs) == nb.shape[0], (len(nblocs), nb.shape)
# assert all(x in locs.as_array for x in nb.mgr_locs.as_array)
def _get_same_shape_values(
lblk: "Block", rblk: "Block", left_ea: bool, right_ea: bool
) -> Tuple[ArrayLike, ArrayLike]:
"""
Slice lblk.values to align with rblk. Squeeze if we have EAs.
"""
lvals = lblk.values
rvals = rblk.values
# Require that the indexing into lvals be slice-like
assert rblk.mgr_locs.is_slice_like, rblk.mgr_locs
# TODO(EA2D): with 2D EAs only this first clause would be needed
if not (left_ea or right_ea):
lvals = lvals[rblk.mgr_locs.indexer, :]
assert lvals.shape == rvals.shape, (lvals.shape, rvals.shape)
elif left_ea and right_ea:
assert lvals.shape == rvals.shape, (lvals.shape, rvals.shape)
elif right_ea:
# lvals are 2D, rvals are 1D
lvals = lvals[rblk.mgr_locs.indexer, :]
assert lvals.shape[0] == 1, lvals.shape
lvals = lvals[0, :]
else:
# lvals are 1D, rvals are 2D
assert rvals.shape[0] == 1, rvals.shape
rvals = rvals[0, :]
return lvals, rvals
def blockwise_all(left: "BlockManager", right: "BlockManager", op) -> bool:
"""
Blockwise `all` reduction.
"""
for info in _iter_block_pairs(left, right):
res = op(info.lvals, info.rvals)
if not res:
return False
return True
| bsd-3-clause |
goodwordalchemy/thinkstats_notes_and_exercises | code/chap12soln.py | 68 | 4459 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import pandas
import numpy as np
import statsmodels.formula.api as smf
import thinkplot
import thinkstats2
import regression
import timeseries
def RunQuadraticModel(daily):
"""Runs a linear model of prices versus years.
daily: DataFrame of daily prices
returns: model, results
"""
daily['years2'] = daily.years**2
model = smf.ols('ppg ~ years + years2', data=daily)
results = model.fit()
return model, results
def PlotQuadraticModel(daily, name):
"""
"""
model, results = RunQuadraticModel(daily)
regression.SummarizeResults(results)
timeseries.PlotFittedValues(model, results, label=name)
thinkplot.Save(root='timeseries11',
title='fitted values',
xlabel='years',
xlim=[-0.1, 3.8],
ylabel='price per gram ($)')
timeseries.PlotResidualPercentiles(model, results)
thinkplot.Save(root='timeseries12',
title='residuals',
xlabel='years',
ylabel='price per gram ($)')
years = np.linspace(0, 5, 101)
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
timeseries.PlotPredictions(daily, years, func=RunQuadraticModel)
thinkplot.Save(root='timeseries13',
title='predictions',
xlabel='years',
xlim=[years[0]-0.1, years[-1]+0.1],
ylabel='price per gram ($)')
def PlotEwmaPredictions(daily, name):
"""
"""
# use EWMA to estimate slopes
filled = timeseries.FillMissing(daily)
filled['slope'] = pandas.ewma(filled.ppg.diff(), span=180)
filled[-1:]
# extract the last inter and slope
start = filled.index[-1]
inter = filled.ewma[-1]
slope = filled.slope[-1]
# reindex the DataFrame, adding a year to the end
dates = pandas.date_range(filled.index.min(),
filled.index.max() + np.timedelta64(365, 'D'))
predicted = filled.reindex(dates)
# generate predicted values and add them to the end
predicted['date'] = predicted.index
one_day = np.timedelta64(1, 'D')
predicted['days'] = (predicted.date - start) / one_day
predict = inter + slope * predicted.days
predicted.ewma.fillna(predict, inplace=True)
# plot the actual values and predictions
thinkplot.Scatter(daily.ppg, alpha=0.1, label=name)
thinkplot.Plot(predicted.ewma)
thinkplot.Save()
class SerialCorrelationTest(thinkstats2.HypothesisTest):
"""Tests serial correlations by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: tuple of xs and ys
"""
series, lag = data
test_stat = abs(thinkstats2.SerialCorr(series, lag))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
series, lag = self.data
permutation = series.reindex(np.random.permutation(series.index))
return permutation, lag
def TestSerialCorr(daily):
"""Tests serial correlations in daily prices and their residuals.
daily: DataFrame of daily prices
"""
# test the correlation between consecutive prices
series = daily.ppg
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
# test for serial correlation in residuals of the linear model
_, results = timeseries.RunLinearModel(daily)
series = results.resid
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
# test for serial correlation in residuals of the quadratic model
_, results = RunQuadraticModel(daily)
series = results.resid
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
def main(name):
transactions = timeseries.ReadData()
dailies = timeseries.GroupByQualityAndDay(transactions)
name = 'high'
daily = dailies[name]
PlotQuadraticModel(daily, name)
TestSerialCorr(daily)
PlotEwmaPredictions(daily, name)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
Rignak/Scripts-Python | DeepLearning/AutoEncoder/Saliency.py | 1 | 2956 | import os
from os.path import join
import numpy as np
import settings
from AutoEncoder import PlotLearning, import_model, batch_process, get_example
import cv2
import matplotlib.pyplot as plt
import scipy
from flat_encoder import import_model
from keras.callbacks import ModelCheckpoint
nfiles = 0
def get_saliency_generator(root, paths, output_shape=settings.INPUT_SHAPE, batch_size=settings.BATCH_SIZE, size=settings.INPUT_SHAPE):
def read(path):
im = cv2.imread(path)
if im.shape != size:
im = scipy.misc.imresize(im, size[:2])
return im
'''supports 2 or 3 classes'''
global nfiles
output_shape = list(output_shape)
if len(paths) == 2:
colors = [np.array([255]),
np.array([0])]
output_shape[-1] = 1
elif len(paths) == 3:
colors = [np.array([0, 0, 255]),
np.array([0, 255, 0]),
np.array([255, 0, 0])]
output_shape[-1] = 3
else:
raise Exception
mapping = {}
for path, color in zip(paths, colors):
files = os.listdir(join(root, path))
for file in files:
mapping[join(root, path, file)] = color
keys = list(mapping.keys())
nfiles = len(keys)
while True:
choices = [keys[i] for i in np.random.randint(0, len(keys), batch_size)]
batch_input = np.zeros((batch_size, output_shape[0], output_shape[1], 3))
batch_output = np.zeros((batch_size, output_shape[0], output_shape[1], output_shape[2]))
for i, choice in enumerate(choices):
batch_input[i] = read(choice)
batch_output[i, :, :] = mapping[choice]
yield batch_input, batch_output
def train_saliency(root, paths):
global nfiles
val_generator = batch_process(get_saliency_generator(join(root, 'val'), paths))
training_generator = batch_process(get_saliency_generator(root, paths))
example = next(val_generator)
next(training_generator)
if example[1].shape[-1] == 1:
last_activation = "sigmoid"
else:
last_activation = 'softmax'
model = import_model(False, settings.INPUT_SHAPE, last_activation=last_activation, canals=example[1].shape[-1])
if example is not None:
plt.imshow(example[0][0, :, :, ::-1])
plt.show(block=True)
model.example = example
calls = [PlotLearning(),
ModelCheckpoint(join('models', 'saliency.h5'),
save_best_only=True)]
model.fit_generator(generator=training_generator,
validation_data=val_generator,
verbose=1,
steps_per_epoch=nfiles//settings.BATCH_SIZE,
validation_steps=nfiles//settings.BATCH_SIZE//10,
epochs=settings.EPOCHS,
callbacks=calls)
if __name__ == '__main__':
train_saliency(settings.DATASET, settings.TAGS)
| gpl-3.0 |
alextag/Twitter-Sentiment-Analysis | FNN/train_sklearn_FNN.py | 2 | 12149 | from __future__ import print_function
from time import time
import numpy as np
import sys, os, operator, pickle
import matplotlib.pyplot as plt
from sklearn.decomposition import RandomizedPCA
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from pybrain.structure import TanhLayer
from pybrain.datasets import ClassificationDataSet
from pybrain.utilities import percentError
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure.modules import SoftmaxLayer
### Global variables
display_graphs = False # Boolean flag for displaying graphs
vocabulary = {} # A dictionary of all the unique words in the corpus
### Change me to higher values for better accuracy!
NUM_FEATURES = 2000 # The number of most common words in the corpus to use as features
PERCENTAGE_DATA_SET_TO_USE = 0.05 # The percentage of the dataset to use
N_COMPONENTS = 200 # The number of components for the PCA
N_HIDDEN = 32
N_EPOCHS = 7
trainer = None
classifier = None
###############################################################################
def load_parsed_data():
"""
Loads the train, test, and validation sets
Returns:
inputs_train the input train set
targets_train the target train set
inputs_valid the input validation set
targets_valid the target validation set
inputs_test the input test set
targets_test the target test set
"""
print('loading parsed dataset')
inputs_train = np.load('../parsed_data/inputs_train.npy')
targets_train = np.load('../parsed_data/targets_train.npy')
inputs_valid = np.load('../parsed_data/inputs_valid.npy')
targets_valid = np.load('../parsed_data/targets_valid.npy')
inputs_test = np.load('../parsed_data/inputs_test.npy')
targets_test = np.load('../parsed_data/targets_test.npy')
print('loaded parsed dataset')
return inputs_train, targets_train, inputs_valid, targets_valid, inputs_test, targets_test
def trained_model_exists():
"""
Checks to see if the extracted features for the Naive Bayes
models are saved.
Returns:
boolean True iff file 'data/model.pkl' exists
"""
return os.path.exists('data/model.pkl')
def load_trained_model():
"""Loads and returns the trained model"""
print('loading trained model')
with open('data/model.pkl', 'rb') as input:
classifier = pickle.load(input)
print('loaded trained model')
input.close()
return classifier
def load_pca():
with open('data/pca.pkl', 'rb') as input:
pca = pickle.load(input)
print('loaded pca')
input.close()
return pca
def save_model(classifier):
"""Saves the model"""
print('saving trained model')
with open('data/model.pkl', 'wb') as output:
pickle.dump(classifier, output, pickle.HIGHEST_PROTOCOL)
print('saved trained model')
def save_pca(pca):
with open('data/pca.pkl', 'wb') as output:
pickle.dump(pca, output, pickle.HIGHEST_PROTOCOL)
print('saved pca')
def load_features():
"""
Loads the extracted features for each data set
Returns:
train_features a dictionary of the features in the train set
valid_features a dictionary of the features in the validation set
test_features a dictionary of the features in the test set
"""
print('loading extracted features')
train_features = np.load('data/train_features.npy')
valid_features = np.load('data/valid_features.npy')
test_features = np.load('data/test_features.npy')
print('loaded extracted features')
return train_features, valid_features, test_features
def save_features(train_features, valid_features, test_features):
"""Saves the extracted features for each dataset"""
print('saving extracted features')
np.save('data/train_features.npy', train_features)
np.save('data/valid_features.npy', valid_features)
np.save('data/test_features.npy', test_features)
print('saved extracted features')
def build_vocabulary(inputs):
"""
Builds a dictionary of unique words in the corpus
Returns:
vocabulary a dictionary of all the unique words in the corpus
"""
print('building vocabulary of words in the corpus')
global vocabulary
for tweet in inputs:
for word in str(tweet).split():
if vocabulary.has_key(word):
vocabulary[word] += 1
else:
vocabulary[word] = 1
print('built vocabulary of words in the corpus')
return vocabulary
def build_features(document, i, vocabulary_words):
if i % 10000 == 0:
print('extracted features for {0} tweets'.format(i))
document_words = set(str(document).split())
features = np.zeros(len(vocabulary_words))
for i in range(len(vocabulary_words)):
features[i] = (vocabulary_words[i] in document_words)
return features
def extract_features(inputs_train, targets_train, inputs_valid, targets_valid, inputs_test, targets_test):
"""
Extracts features for training the model.
Returns:
train_features a dictionary of word presence in the entire input
dataset for each tweet
{'contains(lol)': False, 'contains(jbiebs)': True, ...}
valid_features a dictionary of word presence in the entire input
dataset for each tweet
{'contains(lol)': False, 'contains(jbiebs)': True, ...}
test_features a dictionary of word presence in the entire input
dataset for each tweet
{'contains(lol)': False, 'contains(jbiebs)': True, ...}
"""
inputs = np.hstack((inputs_train, inputs_valid, inputs_test))
vocabulary = build_vocabulary(inputs)
# Get most common words from vocabulary
global NUM_FEATURES
words = dict(sorted(vocabulary.iteritems(), key=operator.itemgetter(1), reverse=True)[:NUM_FEATURES])
words = words.keys()
print('extracting features for all tweets')
train_features = [(build_features(inputs_train[i], i, words)) for i in range(len(inputs_train))]
valid_features = [(build_features(inputs_valid[i], i, words)) for i in range(len(inputs_valid))]
test_features = [(build_features(inputs_test[i], i, words)) for i in range(len(inputs_test))]
print('extracted features for all tweets')
return np.array(train_features), np.array(valid_features), np.array(test_features)
def plot_precision_and_recall(predictions, targets):
"""Calculates and displays the precision and recall graph"""
# Compute Precision-Recall and plot curve
precision = dict()
recall = dict()
average_precision = dict()
average_precision = average_precision_score(targets, predictions)
precision, recall, _ = precision_recall_curve(targets, predictions)
# Plot Precision-Recall curve
plt.clf()
plt.plot(recall, precision, label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision-Recall example: AUC={0:0.2f}'.format(average_precision))
plt.legend(loc="lower left")
plt.show()
def make_prediction(tstdata):
global trainer
if trainer is not None:
error = percentError(trainer.testOnClassData(
dataset=tstdata)
, tstdata['class'])
print ('Percent Error on dataset: ', error)
def train_model( trndata, valid_data ):
print("Fitting the classifier to the training set")
t0 = time()
for i in range(1, N_EPOCHS+1):
trainer.trainEpochs(1)
make_prediction(valid_data)
print("done in %0.3fs" % (time() - t0))
return
def main():
"""
CLI Arguments allowed:
--display_graphs Displays graphs
--retrain Trains a new model
--cross-validate Runs cross validation to fine tune the model
--test=validation_set Tests the latest trained model against the validation set
--test=test_set Tests the latets trained model against the test set
"""
global trainer, classifier
inputs_train, targets_train, inputs_valid, targets_valid, inputs_test, targets_test = load_parsed_data()
if '--display_graphs' in sys.argv:
display_graphs = True
print('using {} percent of all data in corpus'.format(PERCENTAGE_DATA_SET_TO_USE*100))
print('using {} most common words as features'.format(NUM_FEATURES))
if not trained_model_exists() or '--retrain' in sys.argv:
train_features, valid_features, test_features = extract_features(
inputs_train[:len(inputs_train)*PERCENTAGE_DATA_SET_TO_USE],
targets_train[:len(targets_train)*PERCENTAGE_DATA_SET_TO_USE],
inputs_valid[:len(inputs_valid)*PERCENTAGE_DATA_SET_TO_USE],
targets_valid[:len(targets_valid)*PERCENTAGE_DATA_SET_TO_USE],
inputs_test[:len(inputs_test)*PERCENTAGE_DATA_SET_TO_USE],
targets_test[:len(targets_test)*PERCENTAGE_DATA_SET_TO_USE]
)
save_features(train_features, valid_features, test_features)
pca = RandomizedPCA(n_components=N_COMPONENTS, whiten=False).fit(train_features)
save_pca(pca)
print ("Saved PCA")
X_train = pca.transform(train_features)
X_valid = pca.transform(valid_features)
pca = None
print ("Created PCAd features")
valid_data = ClassificationDataSet(N_COMPONENTS, target=1, nb_classes=2)
for i in range(len(X_valid)):
valid_data.addSample(X_valid[i], targets_test[i])
valid_data._convertToOneOfMany()
X_valid = None
train_data = ClassificationDataSet(N_COMPONENTS, target=1, nb_classes=2)
for i in range(len(X_train)):
train_data.addSample( X_train[i], targets_train[i])
train_data._convertToOneOfMany()
X_train = None
classifier = buildNetwork( train_data.indim, N_HIDDEN, train_data.outdim, outclass=SoftmaxLayer)
trainer = BackpropTrainer( classifier, dataset=train_data, momentum=0.1, learningrate=0.01 , verbose=True)
train_model(train_data, valid_data)
save_model(classifier)
train_data = None
valid_data = None
else:
train_features, valid_features, test_features = load_features()
pca = load_pca()
X_train = pca.transform(train_features)
pca = None
print ("Created PCAd features")
train_data = ClassificationDataSet(N_COMPONENTS, target=1, nb_classes=2)
for i in range(len(X_train)):
train_data.addSample( X_train[i], targets_train[i])
train_data._convertToOneOfMany()
X_train = None
classifier = load_trained_model()
trainer = BackpropTrainer( classifier, dataset=train_data, momentum=0.1, learningrate=0.01 , verbose=True)
if '--test=validation_set' in sys.argv:
print ("Running against validation set")
pca = load_pca()
X_valid = pca.transform(valid_features)
pca = None
valid_data = ClassificationDataSet(N_COMPONENTS, target=1, nb_classes=2)
for i in range(len(X_valid)):
valid_data.addSample( X_valid[i], targets_test[i])
valid_data._convertToOneOfMany()
X_valid = None
make_prediction(valid_data)
if '--test=test_set' in sys.argv:
print ("Running against test set")
pca = load_pca()
X_test = pca.transform(test_features)
pca = None
test_data = ClassificationDataSet(N_COMPONENTS, target=1, nb_classes=2)
for i in range(len(X_test)):
test_data.addSample( X_test[i], targets_test[i])
test_data._convertToOneOfMany()
y_pred = trainer.testOnClassData(dataset=test_data)
plot_precision_and_recall(y_pred, targets_test[:len(targets_test) * PERCENTAGE_DATA_SET_TO_USE])
X_test = None
make_prediction(test_data)
if __name__ == "__main__": main()
| gpl-3.0 |
michaelaye/iuvs | iuvs/io.py | 1 | 35314 | import datetime as dt
import os
import socket
import sys
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from astropy.io import fits
from matplotlib.patches import Rectangle
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from pathlib import Path
from scipy.ndimage.filters import generic_filter
from .exceptions import DimensionsError, PathNotReadableError
host = socket.gethostname()
home = Path(os.environ['HOME'])
HOME = home
if host.startswith('maven-iuvs-itf'):
analysis_out = home / 'to_keep'
else:
analysis_out = home / 'data' / 'iuvs' / 'to_keep'
mycmap = 'cubehelix'
plotfolder = HOME / 'plots'
outputfolder = HOME / 'output'
sys_byteorder = ('>', '<')[sys.byteorder == 'little']
def env_path(env):
"""Return root path depending on `env`.
Parameters
----------
env : {'stage', 'production'}
Returns
-------
path
pathilb.Path
"""
host = socket.gethostname()
if host.startswith('maven-iuvs-itf'):
path = Path('/maven_iuvs/{}/products'.format(env))
elif host.startswith('test-machine'):
path = Path('/abc')
else:
path = Path(os.environ['HOME']) / 'Dropbox' / 'data' / 'iuvs'
return path
def convert_big_endian(data):
try:
if data.dtype.byteorder not in ('=', sys_byteorder):
data = data.byteswap().newbyteorder(sys_byteorder)
except AttributeError: # when it's boolean e.g.
pass
return data
def get_data_path(level, env='stage'):
"""Return data path for given `level`.
Some shortcuts for making interactive analysis faster.
Parameters
----------
level : {'l0', 'l1a', 'l1b', 'hk'}
shorter string key to look up the longer subdir's names.
}
env : {'stage', 'production'}, optional
Switch to decide between production or staging environment.
Default: stage.
"""
levelstring = dict(l0='level0', l1a='level1a', l1b='level1b',
hk='housekeeping/level1a')
path = env_path(env) / levelstring[level]
return path
def get_filenames(level, pattern=None, env='stage', ext='.fits.gz',
iterator=True):
"""return iterator (default) or list of filenames for given pattern and environment.
Parameters
----------
level : {'l0', 'l1a', 'l1b', 'hk'}
dict key to look up the respective subdir name in `get_data_path`.
pattern : str, optional
globbing pattern for `Path.glob()`
env : {'stage', 'production'}, optional
Switch to decide between production or staging environment.
Default: stage.
ext : str, optional
Extension for filtering what files to find. Usually '.fits.gz'
iterator : bool
Switch between returning iterator (default) or list.
Returns
-------
list or iterator
List or Iterator of filenames found.
"""
if pattern is None:
pattern = '*'
else:
pattern = '*' + pattern + '*'
path = get_data_path(level, env)
if not os.access(str(path), os.R_OK):
raise PathNotReadableError(path)
result = map(str, path.glob(pattern + ext))
return result if iterator else list(result)
def l1a_filenames(pattern=None, **kwargs):
"""Search for L1A filenames with patterns.
Parameters
----------
pattern : str
will be bracketed with '*', so needs to be correct in itself.
For example "mode080-fuv" but not "mode080fuv".
kwargs : dict
To provide to `get_filenames`
Examples
--------
`pattern` = "mode080-fuv"
but not
`pattern` = "mode080fuv"
as that pattern does not exist.
"""
return get_filenames('l1a', pattern=pattern, **kwargs)
def l1b_filenames(pattern=None, **kwargs):
"""Search for L1B filenames with patterns.
Parameters
----------
pattern : str
will be bracketed with '*', so needs to be correct in itself.
For example "mode080-fuv" but not "mode080fuv".
kwargs : dict
To provide to `get_filenames`
Examples
--------
`pattern` = "mode080-fuv"
but not
`pattern` = "mode080fuv"
as that pattern does not exist.
"""
return get_filenames('l1b', pattern=pattern, **kwargs)
def l0_filenames(pattern=None, **kwargs):
"""Search for L1B filenames with patterns.
Parameters
----------
pattern : str
will be bracketed with '*', so needs to be correct in itself.
For example "mode080-fuv" but not "mode080fuv".
kwargs : dict
To provide to `get_filenames`
Examples
--------
`pattern` = "mode080-fuv"
but not
`pattern` = "mode080fuv"
as that pattern does not exist.
"""
return get_filenames('l0', pattern=pattern, **kwargs)
def l1a_darks(darktype=''):
searchpattern = darktype + 'dark*.fits.gz'
print("Searching for", searchpattern)
return l1a_filenames(searchpattern)
def image_stats(data):
return pd.Series(data.ravel()).describe()
def get_filename_df(level, env='stage', pattern=None):
"""Return pandas.DataFrame with filename data.
Parameters
----------
level : {'l0', 'l1a', 'l1b', 'hk'}
dict key to look up the respective subdir name in `get_data_path`.
env : {'stage', 'production'}, optional
Switch to decide between production or staging environment.
Default: stage.
pattern : str
will be bracketed with '*', so needs to be correct in itself.
For example "mode080-fuv" but not "mode080fuv".
Provided to `get_filenames`.
Returns
-------
pandas.DataFrame
Indexed by time, if possible, sorted.
"""
fnames = get_filenames(level, env=env, pattern=pattern)
iuvs_fnames = []
for fname in fnames:
if not level == 'hk':
iuvs_fnames.append(ScienceFilename(fname))
else:
iuvs_fnames.append(HKFilename(fname))
df = pd.DataFrame([fname.as_series() for fname in iuvs_fnames])
if level != 'hk':
df['channel'] = df.channel.astype('category')
df.set_index('time', inplace=True)
df.sort_index(inplace=True)
# next line filters for newest revisions
return df[df.p.isin(df.groupby('obs_id', sort=False)['p'].max())]
def get_current_hk_fnames(env='stage'):
"return only the latest revisions of filenames per observation_id."
df = get_filename_df('hk', env=env)
return df.p
def get_current_science_fnames(level, pattern=None, env='stage'):
"return only the latest revisions of filenames per observation_id."
df = get_filename_df(level, pattern=pattern, env=env)
return df.p
def get_header_df(hdu, drop_comment=True):
"""Take a FITS HDU, convert to DataFrame.
And on the way:
fix it,drop COMMENT and KERNEL
Parameters
----------
hdu : FITS header unit
The HDU to extract a header dataframe from
drop_comment : bool
To control if the comment and kernel lines from the header should be dropped.
Default: True. No errors are raised when those fields do not exist.
"""
hdu.verify('silentfix')
header = hdu.header
d = {}
for key in set(header.keys()):
if drop_comment and key == 'COMMENT':
continue
data = header[key]
d[key] = convert_big_endian(data)
df = pd.DataFrame(d, index=[0])
return df.drop('COMMENT KERNEL'.split(), axis=1, errors='ignore') if drop_comment else df
def save_to_hdf(df, fname, output_subdir=None):
"""Save temporary HDF file in output folder for later concatenation.
By default the product is stored in HOME/output.
Parameters
----------
df : pd.DataFrame
The dataframe to save
fname : string
The product filename that was used to create this dataframe to save.
The saving filename for the HDF file will be auto-determined from that.
output_subdir : str
String to determine a subfolder inside HOME/output where this data
should be stored instead of just HOME/output
"""
path = Path(fname)
newfname = path.with_suffix('.h5').name
folderpath = HOME / 'output'
if output_subdir:
folderpath = folderpath / output_subdir
path = folderpath / newfname
df.to_hdf(str(path), 'df', format='t')
return str(path)
class Filename(object):
def __init__(self, fname):
self.p = Path(fname)
self.root = self.p.parent
self.basename = self.p.name
self.tokens = self.basename.split('_')
self.mission, self.instrument = self.tokens[:2]
def as_series(self):
return pd.Series(self.__dict__)
class ScienceFilename(Filename):
def __init__(self, fname):
super(ScienceFilename, self).__init__(fname)
tokens = self.tokens
self.level = tokens[2]
self.phase = tokens[3]
self.timestr, self.version = tokens[4:6]
self.revision = tokens[6].split('.')[0]
phasetokens = self.phase.split('-')
if len(phasetokens) == 4:
self.phase, self.cycle_orbit, self.mode, self.channel = phasetokens
elif len(phasetokens) == 3:
self.phase, self.cycle_orbit, self.channel = phasetokens
self.mode = 'N/A'
else:
self.phase, self.channel = phasetokens
self.mode = 'N/A'
self.cycle_orbit = 'N/A'
self.time = dt.datetime.strptime(self.timestr,
'%Y%m%dT%H%M%S')
self.version_string = self.version + self.revision
self.obs_id = '_'.join(self.basename.split('_')[:5])
if self.cycle_orbit.startswith('orbit'):
self.orbit = float(self.cycle_orbit[5:])
else:
self.orbit = np.nan
def __eq__(self, other):
weak_equality = ['mission', 'instrument', 'level', 'phase', 'timestr']
strong_equality = ['version', 'revision']
weak = True
strong = True
for attr in weak_equality:
# if any attribute is different, weak get's set to False
weak = weak and (getattr(self, attr) == getattr(other, attr))
for attr in strong_equality:
strong = strong and (getattr(self, attr) == getattr(other, attr))
if weak and strong:
return True
elif weak:
return 0
else:
return False
def formatter(self, itemstr):
return "{}: {}\n".format(itemstr.capitalize(), getattr(self, itemstr))
def __repr__(self):
items = ('basename mission instrument level phase cycle_orbit mode channel'
' version revision time'.split())
s = ''
for item in items:
s += self.formatter(item)
return s
def __str__(self):
return self.__repr__()
class HKFilename(Filename):
def __init__(self, fname):
super(HKFilename, self).__init__(fname)
tokens = self.tokens
self.kind = tokens[2]
self.level = tokens[3]
self.datestring = tokens[4]
self.version = tokens[5].split('.')[0]
self.obs_id = '_'.join(self.basename.split('_')[:5])
year, month, day = tokens[4][:4], tokens[4][4:6], tokens[4][6:8]
self.time = dt.datetime(int(year), int(month), int(day))
class FitsBinTable(object):
"""Convert a binary Fits table to a pandas table.
Attributes
==========
header: links to the header of the provided HDU
data: contains the pandas DataFrame with the HDU.data
"""
def __init__(self, hdu):
self.header = hdu.header
self.data = pd.DataFrame(hdu.data).T
def iuvs_utc_to_dtime(utcstring):
"Convert the IUVS UTC string to a dtime object."
cleaned = utcstring[:-3] + '0UTC'
time = dt.datetime.strptime(cleaned, '%Y/%j %b %d %H:%M:%S.%f%Z')
return time
def set_spec_vmax_vmin(log, inspec, vmax, vmin):
if log:
spec = np.log10(inspec)
vmax = 2.5 if vmax is None else vmax
vmin = -3.0 if vmin is None else vmin
else:
spec = inspec
vmax = 10 if vmax is None else vmax
vmin = 0 if vmin is None else vmin
return spec, vmax, vmin
def do_labels(ax, title='', set_extent=None):
ax.set_title(title)
if set_extent is True:
xlabel = 'Wavelength [nm]'
elif set_extent is False:
xlabel = 'Spectral bins'
else:
xlabel = 'set_extent not specified in do_labels'
ax.set_xlabel(xlabel)
ax.set_ylabel('Spatial pixels')
def plot_colorbar(im, ax, log):
cb = plt.colorbar(im, ax=ax)
label = 'log(DN/s)' if log else 'DN/s'
cb.set_label(label, fontsize=14, rotation=0)
def plot_hist(ax, spec):
in_axes = inset_axes(ax, width="20%", height="20%",
loc=2)
in_axes.hist(spec.ravel(), bins=20, normed=True)
plt.setp(in_axes.get_xticklabels(), visible=False)
plt.setp(in_axes.get_yticklabels(), visible=False)
in_axes.grid('off')
class ScienceFitsFile(object):
def __init__(self, fname):
"""Base class for L1A/B Reader.
Input:
fname: needs to be absolute complete path. (To be tested.)
"""
if type(fname) == list:
fname = fname[0]
self.fname = fname
self.iuvsfname = Filename(fname)
self.hdulist = fits.open(self.fname)
def get_real_binnings(self, dim):
binning = getattr(self, 'Binning')
widths = binning[dim+'BINWIDTH']
transmits = binning[dim+'BINTRANSMIT'].astype(bool)
return widths[transmits]
@property
def spabins(self):
return self.get_real_binnings('SPA')[:, np.newaxis]
@property
def n_unique_spabins(self):
return np.unique(self.spabins).size
@property
def spebins(self):
return self.get_real_binnings('SPE')[np.newaxis, :]
@property
def n_unique_spebins(self):
return np.unique(self.spebins).size
@property
def scaling_factor(self):
"""Return factor to get DN/s.
Because the binning returns just summed up values, one must
also include the binning as a scaling factor, not only the
integration time.
"""
bin_scale = self.spabins * self.spebins
return bin_scale * self.int_time
def __repr__(self):
s = "Filename: {}\n".format(self.p.name)
s += "Environment: {}\n".format(self.env)
s += "n_dims: {}\n".format(self.n_dims)
s += "spatial: {}\n".format(self.spatial_size)
s += "spectral: {}".format(self.spectral_size)
return s
@property
def n_dims(self):
return self.img_header['NAXIS']
@property
def n_integrations(self):
return int(getattr(self, 'Engineering').get_value(0, 'NUMBER'))
@property
def primary_img_dn_s(self):
return (self.img / self.scaling_factor) + 0.00001
@property
def spatial_size(self):
return self.img_header['NAXIS2']
@property
def spectral_size(self):
return self.img_header['NAXIS1']
@property
def int_time(self):
return self.img_header['INT_TIME']
@property
def wavelengths(self):
return getattr(self, 'Observation')['WAVELENGTH'][0]
@property
def img_header(self):
imgdata = self.hdulist[0]
return imgdata.header
@property
def img(self):
return self.hdulist[0].data
@property
def scaled_img(self):
return self.img / self.scaling_factor
@property
def plotfname(self):
return os.path.basename(self.fname)[12:-16]
@property
def plottitle(self):
title = "{fname}, INT_TIME: {int}".format(fname=self.plotfname,
int=self.int_time)
return title
@property
def capture(self):
string = self.img_header['CAPTURE']
return iuvs_utc_to_dtime(string)
@property
def integration_times(self):
"Convert times from Integration table to pandas TimeSeries"
return getattr(self, 'Integration').loc['UTC'].map(iuvs_utc_to_dtime)
def get_integration(self, data_attr, integration):
data = getattr(self, data_attr)
if data.ndim == 3:
if integration is None:
print("More than 1 integration present.\n"
"Need to provide integration index.")
return
else:
spec = data[integration]
else:
spec = data
return spec
def get_n_data_attr(self, data_attr):
data = getattr(self, data_attr)
if data.ndim == 3:
return data.shape[0]
else:
return 1
def plot_some_spectrogram(self, inspec, title, ax=None, cmap=None,
cbar=True, log=False, showaxis=True,
min_=None, max_=None, set_extent=None,
draw_rectangle=True, vmin=None, vmax=None,
**kwargs):
plot_hist = kwargs.pop('plot_hist', False)
savename = kwargs.pop('savename', False)
spec, vmax, vmin = set_spec_vmax_vmin(log, inspec, vmax, vmin)
cmap = mycmap if cmap is None else cmap
if ax is None:
fig, ax = plt.subplots()
fig.suptitle(self.plottitle, fontsize=16)
try:
waves = self.wavelengths[0]
except IndexError:
waves = self.wavelengths
if set_extent:
im = ax.imshow(spec, cmap=cmap,
extent=(waves[0], waves[-1], len(spec), 0),
vmin=vmin,
vmax=vmax,
aspect='auto',
**kwargs)
else:
im = ax.imshow(spec, cmap=cmap, vmin=vmin, vmax=vmax,
aspect='auto', **kwargs)
do_labels(ax, title=title, set_extent=set_extent)
if not showaxis:
ax.grid('off')
if cbar:
plot_colorbar(im, ax, log)
# rectangle
if draw_rectangle:
ax.add_patch(get_rectangle(inspec))
# inset histogram
if plot_hist:
plot_hist(ax, spec)
if savename:
ax.get_figure().savefig(savename, dpi=100)
self.current_ax = ax
self.current_spec = spec
return ax
def plot_some_profile(self, data_attr, integration,
spatial=None, ax=None, scale=False,
log=False, spa_average=False, title=None,
**kwargs):
plot_hist = kwargs.pop('plot_hist', False)
savename = kwargs.pop('savename', False)
spec = self.get_integration(data_attr, integration)
nints = self.get_n_data_attr(data_attr)
if scale:
spec = spec / self.scaling_factor
if spatial is None:
# if no spatial bin given, take the middle one
spatial = self.spatial_size // 2
if title is None:
if not spa_average:
title = ("Profile of {} at spatial: {}, integration {} of {}"
.format(data_attr, spatial, integration, nints))
else:
title = ("Profile of {}, spatial mean. Integration {} of {}"
.format(data_attr, integration, nints))
if ax is None:
fig, ax = plt.subplots()
fig.suptitle(self.plottitle, fontsize=12)
if log:
func = ax.semilogy
else:
func = ax.plot
if spa_average:
data = spec.mean(axis=0)
else:
data = spec[spatial]
func(self.wavelengths[spatial], data, **kwargs)
ax.set_xlim((self.wavelengths[spatial][0],
self.wavelengths[spatial][-1]))
ax.set_title(title, fontsize=11)
ax.set_xlabel("Wavelength [nm]")
if log:
ax.set_ylabel("log(DN/s)")
else:
ax.set_ylabel('DN/s')
if plot_hist:
in_axes = inset_axes(ax, width="20%", height="20%",
loc=2)
in_axes.hist(spec.ravel(), bins=20, normed=True, log=True)
plt.setp(in_axes.get_xticklabels(), visible=False)
plt.setp(in_axes.get_yticklabels(), visible=False)
in_axes.grid('off')
if savename:
ax.get_figure().savefig(savename, dpi=100)
return ax
def plot_img_spectrogram(self,
integration=None, ax=None,
cmap=None, cbar=True, log=True, **kwargs):
spec = self.get_integration('img', integration)
title = ("Primary spectrogram, integration {} out of {}"
.format(integration, self.n_integrations))
return self.plot_some_spectrogram(spec, title,
ax, cmap, cbar, log, **kwargs)
def plot_img_profile(self, integration=None, ax=None, log=True,
**kwargs):
return self.plot_some_profile('img', integration, ax=ax,
**kwargs)
def image_stats(self):
return image_stats(self.img)
def fits_table_to_dataframe(hdu):
d = {}
for col in hdu.columns:
data = hdu.data[col.name]
d[col.name] = convert_big_endian(data)
return pd.DataFrame(d)
class L1AReader(ScienceFitsFile):
"""For Level1a"""
works_with_dataframes = [
'Integration',
'Engineering',
]
level = 'l1a'
def __init__(self, fname, env='production'):
# fix relative paths
self.env = env
fname = Path(fname)
if not fname.is_absolute():
fname = get_data_path(self.level, env) / fname
self.p = fname
# call super init
super(L1AReader, self).__init__(str(fname))
if self.spectral_size == 1024 and self.spatial_size == 1024:
warnings.warn("\nNot loading HDU data due to performance issue.\n"
"Identified with 'Observation' HDU so far, working with other\n"
"data should be fine.")
else:
for hdu in self.hdulist[1:]:
name = hdu.header['EXTNAME']
setattr(self, name + '_header', hdu.header)
if name in self.works_with_dataframes:
setattr(self, name, fits_table_to_dataframe(hdu))
else:
setattr(self, name, hdu.data)
# check for error case with binning table not found:
if self.n_dims == 2 and self.n_integrations > 1:
raise DimensionsError('n_dims == 2 with n_integrations > 1')
class L1BReader(ScienceFitsFile):
"""For Level1B"""
level = 'l1b'
works_with_dataframes = ['DarkIntegration',
'DarkEngineering',
'background_light_source',
'Integration',
'Engineering']
def __init__(self, fname, env='stage'):
# fix relative path
if not os.path.isabs(fname):
fname = get_data_path(self.level, env) / fname
# call super init
super(L1BReader, self).__init__(fname)
for hdu in self.hdulist[1:]:
name = hdu.header['EXTNAME']
setattr(self, name + '_header', hdu.header)
if name in self.works_with_dataframes:
setattr(self, name, pd.DataFrame(hdu.data))
else:
setattr(self, hdu.header['EXTNAME'], hdu.data)
self.darks_interpolated = getattr(self, 'background_dark')
@property
def dark_det_temps(self):
return getattr(self, 'Dark_Integration')['DET_TEMP_C']
@property
def dark_case_temps(self):
return getattr(self, 'Dark_Integration')['CASE_TEMP_C']
@property
def dark_times(self):
try:
utcs = getattr(self, 'DarkIntegration')['UTC']
except AttributeError:
utcs = getattr(self, 'Dark_Integration')['UTC']
times = []
for utc in utcs:
times.append(iuvs_utc_to_dtime(utc))
return pd.TimeSeries(times)
@property
def n_darks(self):
return getattr(self, 'detector_dark').shape[0]
@property
def raw_dn_s(self):
return (getattr(self, 'detector_raw') / self.scaling_factor) + 0.001
@property
def dark_dn_s(self):
return (getattr(self, 'detector_dark') / self.scaling_factor) + 0.001
@property
def dds_dn_s(self):
try:
dds = getattr(self, 'detector_dark_subtracted')
except AttributeError:
dds = getattr(self, 'detector_background_subtracted')
return (dds / self.scaling_factor) + 0.001
def plot_raw_spectrogram(self, integration=None, ax=None,
cmap=None, cbar=True, log=False,
set_extent=True,
**kwargs):
if integration is None:
integration = -1
spec = self.get_integration('raw_dn_s', integration)
title = ("Raw light spectrogram, integration {} out of {}"
.format(integration, self.n_integrations))
return self.plot_some_spectrogram(spec, title, ax,
cmap, cbar, log,
set_extent=set_extent,
**kwargs)
def plot_dark_spectrogram(self, integration=None, ax=None,
cmap=None, cbar=True, log=False,
**kwargs):
dark = self.get_integration('dark_dn_s', integration)
title = ("Dark spectogram, integration {} out of {}"
.format(integration, self.n_darks))
return self.plot_some_spectrogram(dark, title, ax,
cmap, cbar, log, **kwargs)
def plot_raw_overview(self, integration=None, imglog=True,
save_token=None, spatial=None, proflog=True,
img_plot_hist=False, prof_plot_hist=False,
**kwargs):
if integration is None:
integration = -1
"Plot overview of spectrogram and profile at index `integration`."
fig, axes = plt.subplots(nrows=2, sharex=False)
fig.suptitle(self.plottitle, fontsize=16)
# spectrogram
ax = self.plot_raw_spectrogram(integration, ax=axes[0],
cbar=False, log=imglog,
set_extent=False, plot_hist=img_plot_hist,
**kwargs)
# profile
self.plot_raw_profile(integration, ax=axes[1], log=proflog,
spatial=spatial, plot_hist=prof_plot_hist)
# colorbar
im = ax.get_images()[0] # pylint: disable=no-member
fig.tight_layout()
fig.subplots_adjust(top=0.9, bottom=0.1)
# cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
# cb = plt.colorbar(im, cax=cbar_ax)
cb = plt.colorbar(im, ax=axes.ravel().tolist())
# cb = plt.colorbar(im, ax=axes[0])
if imglog:
label = ' log(DN/s)'
else:
label = ' DN/s'
cb.set_label(label, fontsize=13, rotation=0)
if save_token is not None:
fname = "{}_{}.png".format(self.plotfname,
save_token)
fig.savefig(os.path.join(str(plotfolder), fname), dpi=150)
def plot_mean_values(self, item):
fig, ax = plt.subplots()
fig.suptitle(self.plottitle)
ax.plot(getattr(self, item).mean(axis=(1, 2)))
ax.set_xlabel("Integration number")
ax.set_ylabel("DN / s")
ax.set_title("Mean {} over observation (i.e. L1B file)".format(item))
savename = os.path.join(str(plotfolder),
self.plotfname + 'mean_{}.png'.format(item))
plt.savefig(savename, dpi=120)
def plot_mean_raw_values(self):
self.plot_mean_values('raw_dn_s')
def plot_mean_dds_values(self):
self.plot_mean_values('dds_dn_s')
def plot_dark_spectrograms(self):
fig, axes = plt.subplots(nrows=self.n_darks, sharex=True)
fig.suptitle(self.plottitle)
for i, ax in zip(range(self.n_darks), axes):
self.plot_dark_spectrogram(integration=i, ax=ax)
if i < self.n_darks - 1:
ax.set_xlabel('')
savename = os.path.join(str(plotfolder), self.plotfname + '_dark_spectograms.png')
plt.savefig(savename, dpi=150)
def plot_dark_histograms(self, save=False):
fig, ax = plt.subplots()
for i, dark in enumerate(self.dark_dn_s):
ax.hist(dark.ravel(), 100, log=True,
label="dark{}".format(i), alpha=0.5)
plt.legend()
fig.suptitle(self.plottitle)
ax.set_title('Dark histograms, DN / s')
if save:
savename = os.path.join(str(plotfolder), self.plotfname + '_dark_histograms.png')
plt.savefig(savename, dpi=150)
def find_scaling_window(self, spec):
self.spa_slice, self.spe_slice = find_scaling_window(spec)
return self.spa_slice, self.spe_slice
def plot_raw_profile(self, integration=-1, ax=None, log=None,
spatial=None, **kwargs):
return self.plot_some_profile('raw_dn_s', integration,
ax=ax, log=log, spatial=spatial,
**kwargs)
def plot_dark_profile(self, integration=-1, ax=None, log=None):
return self.plot_some_profile('dark_dn_s', integration,
ax=ax, log=log)
def plot_dds_profile(self, integration=-1, ax=None, log=None):
return self.plot_some_profile('dds_dn_s', integration,
ax=ax, log=log)
def get_light_and_dark(self, integration):
light = self.get_integration('raw_dn_s', integration)
dark = self.get_integration('dark_dn_s', integration)
return light, dark
def show_all_darks(self):
fig, axes = plt.subplots(nrows=self.n_darks, sharex=True)
for ax, i_dark in zip(axes.ravel(), range(self.n_darks)):
self.plot_dark_spectrogram(i_dark, ax=ax)
for ax in axes[:-1]:
ax.set_xlabel('')
def profile_all_darks(self):
fig, axes = plt.subplots()
for i, dark in enumerate(self.dark_dn_s):
axes.plot(self.wavelengths[0],
dark.mean(axis=0), label=i, lw=2)
axes.legend(loc='best')
axes.set_xlim((self.wavelengths[0][0],
self.wavelengths[0][-1]))
axes.set_xlabel("Wavelength [nm]")
axes.set_ylabel("DN / s")
axes.set_title("{}\nSpatial mean profiles of darks."
.format(self.plottitle),
fontsize=14)
def plot_spectrum(self, data, integration, spatial=None, ax=None, scale=False,
log=False, spa_average=False, title=None,
**kwargs):
savename = kwargs.pop('savename', False)
spec = data[integration]
if spatial is None:
# if no spatial bin given, take the middle one
spatial = self.spatial_size // 2
if ax is None:
fig, ax = plt.subplots()
fig.suptitle(self.plottitle, fontsize=12)
if log:
func = ax.semilogy
else:
func = ax.plot
if spa_average:
data = spec.mean(axis=0)
else:
data = spec[spatial]
func(self.wavelengths[spatial], data, **kwargs)
ax.set_xlim((self.wavelengths[spatial][0],
self.wavelengths[spatial][-1]))
ax.set_title(title, fontsize=11)
ax.set_xlabel("Wavelength [nm]")
if log:
ax.set_ylabel("log(DN/s)")
else:
ax.set_ylabel('DN/s')
if savename:
ax.get_figure().savefig(savename, dpi=100)
return ax
def get_rectangle(spectogram):
spa_slice, spe_slice = find_scaling_window(spectogram)
xy = spe_slice.start - 0.5, spa_slice.start - 0.5
width = spe_slice.stop - spe_slice.start
height = spa_slice.stop - spa_slice.start
return Rectangle(xy, width, height, fill=False, color='white',
lw=2)
def find_scaling_window(to_filter, size=None):
if size is None:
x = max(to_filter.shape[0] // 5, 2)
y = max(to_filter.shape[1] // 10, 1)
size = (x, y)
filtered = generic_filter(to_filter, np.median, size=size,
mode='constant', cval=to_filter.max() * 100)
min_spa, min_spe = np.unravel_index(filtered.argmin(), to_filter.shape)
spa1 = min_spa - size[0] // 2
if spa1 < 0:
spa1 = 0
spa2 = spa1 + size[0]
if spa2 > to_filter.shape[0]:
spa1 = to_filter.shape[0] - size[0]
spa2 = to_filter.shape[0]
spe1 = min_spe - size[1] // 2
if spe1 < 0:
spe1 = 0
spe2 = spe1 + size[1]
if spe2 > to_filter.shape[1]:
spe1 = to_filter.shape[1] - size[1]
spe2 = to_filter.shape[1]
spa_slice = slice(spa1, spa2)
spe_slice = slice(spe1, spe2)
return (spa_slice, spe_slice)
def check_scaling_window_finder(l1b, integration):
to_filter = l1b.get_integration('raw_dn_s', integration)
x = max(to_filter.shape[0] // 10, 1)
y = max(to_filter.shape[1] // 10, 1)
size = (x, y)
print("Img shape:", to_filter.shape)
print("Kernel size:", size)
filtered = generic_filter(to_filter, np.std, size=size,
mode='constant', cval=to_filter.max() * 100)
min_spa, min_spe = np.unravel_index(filtered.argmin(), to_filter.shape)
print("Minimum:", filtered.min())
print("Minimum coords", min_spa, min_spe)
spa1 = min_spa - size[0] // 2
if spa1 < 0:
spa1 = 0
spa2 = spa1 + size[0]
if spa2 > to_filter.shape[0]:
spa1 = to_filter.shape[0] - size[0]
spa2 = to_filter.shape[0]
print("Spatial:", spa1, spa2)
spe1 = min_spe - size[1] // 2
if spe1 < 0:
spe1 = 0
spe2 = spe1 + size[1]
if spe2 > to_filter.shape[1]:
spe1 = to_filter.shape[1] - size[1]
spe2 = to_filter.shape[1]
print("Spectral:", spe1, spe2)
fig, axes = plt.subplots(nrows=3)
axes[0].imshow(np.log(to_filter), cmap=mycmap)
axes[0].add_patch(get_rectangle(to_filter))
axes[1].imshow(np.log(filtered), cmap=mycmap, vmax=0.1)
axes[1].add_patch(get_rectangle(to_filter))
axes[2].hist(filtered[~np.isnan(filtered)].ravel(), bins=100)
def some_file(level, pattern):
try:
fname = get_filenames(level, pattern=pattern, iterator=False)[0]
except IndexError:
print("No {} files found.".format(level))
return fname
def some_l1a(pattern=None):
return L1AReader(some_file('l1a', pattern))
def some_l1b(pattern=None):
return L1BReader(some_file('l1b', pattern))
| isc |
Sentient07/scikit-learn | sklearn/metrics/cluster/__init__.py | 91 | 1468 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import fowlkes_mallows_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .unsupervised import calinski_harabaz_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"fowlkes_mallows_score", "entropy", "silhouette_samples",
"silhouette_score", "calinski_harabaz_score", "consensus_score"]
| bsd-3-clause |
mne-tools/mne-python | mne/decoding/csp.py | 4 | 35050 | # -*- coding: utf-8 -*-
# Authors: Romain Trachel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Alexandre Barachant <[email protected]>
# Clemens Brunner <[email protected]>
# Jean-Remi King <[email protected]>
#
# License: BSD (3-clause)
import copy as cp
import numpy as np
from .base import BaseEstimator
from .mixin import TransformerMixin
from ..cov import _regularized_covariance
from ..fixes import pinv
from ..utils import fill_doc, _check_option, _validate_type
@fill_doc
class CSP(TransformerMixin, BaseEstimator):
"""M/EEG signal decomposition using the Common Spatial Patterns (CSP).
This class can be used as a supervised decomposition to estimate spatial
filters for feature extraction. CSP in the context of EEG was first
described in :footcite:`KolesEtAl1990`; a comprehensive tutorial on CSP can
be found in :footcite:`BlankertzEtAl2008`. Multi-class solving is
implemented from :footcite:`Grosse-WentrupBuss2008`.
Parameters
----------
n_components : int (default 4)
The number of components to decompose M/EEG signals. This number should
be set by cross-validation.
reg : float | str | None (default None)
If not None (same as ``'empirical'``, default), allow regularization
for covariance estimation. If float (between 0 and 1), shrinkage is
used. For str values, ``reg`` will be passed as ``method`` to
:func:`mne.compute_covariance`.
log : None | bool (default None)
If ``transform_into`` equals ``'average_power'`` and ``log`` is None or
True, then apply a log transform to standardize features, else features
are z-scored. If ``transform_into`` is ``'csp_space'``, ``log`` must be
None.
cov_est : 'concat' | 'epoch' (default 'concat')
If ``'concat'``, covariance matrices are estimated on concatenated
epochs for each class. If ``'epoch'``, covariance matrices are
estimated on each epoch separately and then averaged over each class.
transform_into : 'average_power' | 'csp_space' (default 'average_power')
If 'average_power' then ``self.transform`` will return the average
power of each spatial filter. If ``'csp_space'``, ``self.transform``
will return the data in CSP space.
norm_trace : bool (default False)
Normalize class covariance by its trace. Trace normalization is a step
of the original CSP algorithm :footcite:`KolesEtAl1990` to eliminate
magnitude variations in the EEG between individuals. It is not applied
in more recent work :footcite:`BlankertzEtAl2008`,
:footcite:`Grosse-WentrupBuss2008` and can have a negative impact on
pattern order.
cov_method_params : dict | None
Parameters to pass to :func:`mne.compute_covariance`.
.. versionadded:: 0.16
%(rank_None)s
.. versionadded:: 0.17
component_order : 'mutual_info' | 'alternate' (default 'mutual_info')
If ``'mutual_info'`` order components by decreasing mutual information
(in the two-class case this uses a simplification which orders
components by decreasing absolute deviation of the eigenvalues from 0.5
:footcite:`BarachantEtAl2010`). For the two-class case, ``'alternate'``
orders components by starting with the largest eigenvalue, followed by
the smallest, the second-to-largest, the second-to-smallest, and so on
:footcite:`BlankertzEtAl2008`.
.. versionadded:: 0.21
Attributes
----------
filters_ : ndarray, shape (n_channels, n_channels)
If fit, the CSP components used to decompose the data, else None.
patterns_ : ndarray, shape (n_channels, n_channels)
If fit, the CSP patterns used to restore M/EEG signals, else None.
mean_ : ndarray, shape (n_components,)
If fit, the mean squared power for each component.
std_ : ndarray, shape (n_components,)
If fit, the std squared power for each component.
See Also
--------
mne.preprocessing.Xdawn, SPoC
References
----------
.. footbibliography::
"""
def __init__(self, n_components=4, reg=None, log=None, cov_est='concat',
transform_into='average_power', norm_trace=False,
cov_method_params=None, rank=None,
component_order='mutual_info'):
# Init default CSP
if not isinstance(n_components, int):
raise ValueError('n_components must be an integer.')
self.n_components = n_components
self.rank = rank
self.reg = reg
# Init default cov_est
if not (cov_est == "concat" or cov_est == "epoch"):
raise ValueError("unknown covariance estimation method")
self.cov_est = cov_est
# Init default transform_into
self.transform_into = _check_option('transform_into', transform_into,
['average_power', 'csp_space'])
# Init default log
if transform_into == 'average_power':
if log is not None and not isinstance(log, bool):
raise ValueError('log must be a boolean if transform_into == '
'"average_power".')
else:
if log is not None:
raise ValueError('log must be a None if transform_into == '
'"csp_space".')
self.log = log
_validate_type(norm_trace, bool, 'norm_trace')
self.norm_trace = norm_trace
self.cov_method_params = cov_method_params
self.component_order = _check_option('component_order',
component_order,
('mutual_info', 'alternate'))
def _check_Xy(self, X, y=None):
"""Check input data."""
if not isinstance(X, np.ndarray):
raise ValueError("X should be of type ndarray (got %s)."
% type(X))
if y is not None:
if len(X) != len(y) or len(y) < 1:
raise ValueError('X and y must have the same length.')
if X.ndim < 3:
raise ValueError('X must have at least 3 dimensions.')
def fit(self, X, y):
"""Estimate the CSP decomposition on epochs.
Parameters
----------
X : ndarray, shape (n_epochs, n_channels, n_times)
The data on which to estimate the CSP.
y : array, shape (n_epochs,)
The class for each epoch.
Returns
-------
self : instance of CSP
Returns the modified instance.
"""
self._check_Xy(X, y)
self._classes = np.unique(y)
n_classes = len(self._classes)
if n_classes < 2:
raise ValueError("n_classes must be >= 2.")
if n_classes > 2 and self.component_order == 'alternate':
raise ValueError("component_order='alternate' requires two "
"classes, but data contains {} classes; use "
"component_order='mutual_info' "
"instead.".format(n_classes))
covs, sample_weights = self._compute_covariance_matrices(X, y)
eigen_vectors, eigen_values = self._decompose_covs(covs,
sample_weights)
ix = self._order_components(covs, sample_weights, eigen_vectors,
eigen_values, self.component_order)
eigen_vectors = eigen_vectors[:, ix]
self.filters_ = eigen_vectors.T
self.patterns_ = pinv(eigen_vectors)
pick_filters = self.filters_[:self.n_components]
X = np.asarray([np.dot(pick_filters, epoch) for epoch in X])
# compute features (mean power)
X = (X ** 2).mean(axis=2)
# To standardize features
self.mean_ = X.mean(axis=0)
self.std_ = X.std(axis=0)
return self
def transform(self, X):
"""Estimate epochs sources given the CSP filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The data.
Returns
-------
X : ndarray
If self.transform_into == 'average_power' then returns the power of
CSP features averaged over time and shape (n_epochs, n_sources)
If self.transform_into == 'csp_space' then returns the data in CSP
space and shape is (n_epochs, n_sources, n_times).
"""
if not isinstance(X, np.ndarray):
raise ValueError("X should be of type ndarray (got %s)." % type(X))
if self.filters_ is None:
raise RuntimeError('No filters available. Please first fit CSP '
'decomposition.')
pick_filters = self.filters_[:self.n_components]
X = np.asarray([np.dot(pick_filters, epoch) for epoch in X])
# compute features (mean band power)
if self.transform_into == 'average_power':
X = (X ** 2).mean(axis=2)
log = True if self.log is None else self.log
if log:
X = np.log(X)
else:
X -= self.mean_
X /= self.std_
return X
@fill_doc
def plot_patterns(self, info, components=None, ch_type=None,
vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
colorbar=True, scalings=None, units='a.u.', res=64,
size=1, cbar_fmt='%3.1f', name_format='CSP%01d',
show=True, show_names=False, title=None, mask=None,
mask_params=None, outlines='head', contours=6,
image_interp='bilinear', average=None,
sphere=None):
"""Plot topographic patterns of components.
The patterns explain how the measured data was generated from the
neural sources (a.k.a. the forward model).
Parameters
----------
info : instance of Info
Info dictionary of the epochs used for fitting.
If not possible, consider using ``create_info``.
components : float | array of float | None
The patterns to plot. If None, n_components will be shown.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
vmin : float | callable
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, default np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap
to use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging
the colorbar with left and right mouse button. Left mouse button
moves the scale up and down and right mouse button adjusts the
range. Hitting space bar resets the range. Up and down arrows can
be used to change the colormap. If None, 'Reds' is used for all
positive data, otherwise defaults to 'RdBu_r'. If 'interactive',
translates to (None, True). Defaults to 'RdBu_r'.
.. warning:: Interactive mode works smoothly only for a small
amount of topomaps.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True,
a circle will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scalings : dict | float | None
The scalings of the channel types to be applied for plotting.
If None, defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``.
units : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
name_format : str
String format for topomap values. Defaults to "CSP%%01d".
show : bool
Show figure if True.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If ``mask`` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
%(topomap_outlines)s
contours : int | array of float
The number of contour lines to draw. If 0, no contours will be
drawn. When an integer, matplotlib ticker locator is used to find
suitable values for the contour thresholds (may sometimes be
inaccurate, use array for accuracy). If an array, the values
represent the levels for the contours. Defaults to 6.
image_interp : str
The image interpolation to be used.
All matplotlib options are accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
%(topomap_sphere_auto)s
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from .. import EvokedArray
if components is None:
components = np.arange(self.n_components)
# set sampling frequency to have 1 component per time point
info = cp.deepcopy(info)
info['sfreq'] = 1.
# create an evoked
patterns = EvokedArray(self.patterns_.T, info, tmin=0)
# the call plot_topomap
return patterns.plot_topomap(
times=components, ch_type=ch_type,
vmin=vmin, vmax=vmax, cmap=cmap, colorbar=colorbar, res=res,
cbar_fmt=cbar_fmt, sensors=sensors,
scalings=scalings, units=units, time_unit='s',
time_format=name_format, size=size, show_names=show_names,
title=title, mask_params=mask_params, mask=mask, outlines=outlines,
contours=contours, image_interp=image_interp, show=show,
average=average, sphere=sphere)
@fill_doc
def plot_filters(self, info, components=None, ch_type=None,
vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
colorbar=True, scalings=None, units='a.u.', res=64,
size=1, cbar_fmt='%3.1f', name_format='CSP%01d',
show=True, show_names=False, title=None, mask=None,
mask_params=None, outlines='head', contours=6,
image_interp='bilinear', average=None):
"""Plot topographic filters of components.
The filters are used to extract discriminant neural sources from
the measured data (a.k.a. the backward model).
Parameters
----------
info : instance of Info
Info dictionary of the epochs used for fitting.
If not possible, consider using ``create_info``.
components : float | array of float | None
The patterns to plot. If None, n_components will be shown.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
vmin : float | callable
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap
to use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging
the colorbar with left and right mouse button. Left mouse button
moves the scale up and down and right mouse button adjusts the
range. Hitting space bar resets the range. Up and down arrows can
be used to change the colormap. If None, 'Reds' is used for all
positive data, otherwise defaults to 'RdBu_r'. If 'interactive',
translates to (None, True). Defaults to 'RdBu_r'.
.. warning:: Interactive mode works smoothly only for a small
amount of topomaps.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True,
a circle will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scalings : dict | float | None
The scalings of the channel types to be applied for plotting.
If None, defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``.
units : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
name_format : str
String format for topomap values. Defaults to "CSP%%01d".
show : bool
Show figure if True.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If ``mask`` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
%(topomap_outlines)s
contours : int | array of float
The number of contour lines to draw. If 0, no contours will be
drawn. When an integer, matplotlib ticker locator is used to find
suitable values for the contour thresholds (may sometimes be
inaccurate, use array for accuracy). If an array, the values
represent the levels for the contours. Defaults to 6.
image_interp : str
The image interpolation to be used.
All matplotlib options are accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from .. import EvokedArray
if components is None:
components = np.arange(self.n_components)
# set sampling frequency to have 1 component per time point
info = cp.deepcopy(info)
info['sfreq'] = 1.
# create an evoked
filters = EvokedArray(self.filters_.T, info, tmin=0)
# the call plot_topomap
return filters.plot_topomap(
times=components, ch_type=ch_type, vmin=vmin,
vmax=vmax, cmap=cmap, colorbar=colorbar, res=res,
cbar_fmt=cbar_fmt, sensors=sensors, scalings=scalings, units=units,
time_unit='s', time_format=name_format, size=size,
show_names=show_names, title=title, mask_params=mask_params,
mask=mask, outlines=outlines, contours=contours,
image_interp=image_interp, show=show, average=average)
def _compute_covariance_matrices(self, X, y):
_, n_channels, _ = X.shape
if self.cov_est == "concat":
cov_estimator = self._concat_cov
elif self.cov_est == "epoch":
cov_estimator = self._epoch_cov
covs = []
sample_weights = []
for this_class in self._classes:
cov, weight = cov_estimator(X[y == this_class])
if self.norm_trace:
cov /= np.trace(cov)
covs.append(cov)
sample_weights.append(weight)
return np.stack(covs), np.array(sample_weights)
def _concat_cov(self, x_class):
"""Concatenate epochs before computing the covariance."""
_, n_channels, _ = x_class.shape
x_class = np.transpose(x_class, [1, 0, 2])
x_class = x_class.reshape(n_channels, -1)
cov = _regularized_covariance(
x_class, reg=self.reg, method_params=self.cov_method_params,
rank=self.rank)
weight = x_class.shape[0]
return cov, weight
def _epoch_cov(self, x_class):
"""Mean of per-epoch covariances."""
cov = sum(_regularized_covariance(
this_X, reg=self.reg,
method_params=self.cov_method_params,
rank=self.rank) for this_X in x_class)
cov /= len(x_class)
weight = len(x_class)
return cov, weight
def _decompose_covs(self, covs, sample_weights):
from scipy import linalg
n_classes = len(covs)
if n_classes == 2:
eigen_values, eigen_vectors = linalg.eigh(covs[0], covs.sum(0))
else:
# The multiclass case is adapted from
# http://github.com/alexandrebarachant/pyRiemann
eigen_vectors, D = _ajd_pham(covs)
eigen_vectors = self._normalize_eigenvectors(eigen_vectors.T, covs,
sample_weights)
eigen_values = None
return eigen_vectors, eigen_values
def _compute_mutual_info(self, covs, sample_weights, eigen_vectors):
class_probas = sample_weights / sample_weights.sum()
mutual_info = []
for jj in range(eigen_vectors.shape[1]):
aa, bb = 0, 0
for (cov, prob) in zip(covs, class_probas):
tmp = np.dot(np.dot(eigen_vectors[:, jj].T, cov),
eigen_vectors[:, jj])
aa += prob * np.log(np.sqrt(tmp))
bb += prob * (tmp ** 2 - 1)
mi = - (aa + (3.0 / 16) * (bb ** 2))
mutual_info.append(mi)
return mutual_info
def _normalize_eigenvectors(self, eigen_vectors, covs, sample_weights):
# Here we apply an euclidean mean. See pyRiemann for other metrics
mean_cov = np.average(covs, axis=0, weights=sample_weights)
for ii in range(eigen_vectors.shape[1]):
tmp = np.dot(np.dot(eigen_vectors[:, ii].T, mean_cov),
eigen_vectors[:, ii])
eigen_vectors[:, ii] /= np.sqrt(tmp)
return eigen_vectors
def _order_components(self, covs, sample_weights, eigen_vectors,
eigen_values, component_order):
n_classes = len(self._classes)
if component_order == 'mutual_info' and n_classes > 2:
mutual_info = self._compute_mutual_info(covs, sample_weights,
eigen_vectors)
ix = np.argsort(mutual_info)[::-1]
elif component_order == 'mutual_info' and n_classes == 2:
ix = np.argsort(np.abs(eigen_values - 0.5))[::-1]
elif component_order == 'alternate' and n_classes == 2:
i = np.argsort(eigen_values)
ix = np.empty_like(i)
ix[1::2] = i[:len(i) // 2]
ix[0::2] = i[len(i) // 2:][::-1]
return ix
def _ajd_pham(X, eps=1e-6, max_iter=15):
"""Approximate joint diagonalization based on Pham's algorithm.
This is a direct implementation of the PHAM's AJD algorithm [1].
Parameters
----------
X : ndarray, shape (n_epochs, n_channels, n_channels)
A set of covariance matrices to diagonalize.
eps : float, default 1e-6
The tolerance for stopping criterion.
max_iter : int, default 1000
The maximum number of iteration to reach convergence.
Returns
-------
V : ndarray, shape (n_channels, n_channels)
The diagonalizer.
D : ndarray, shape (n_epochs, n_channels, n_channels)
The set of quasi diagonal matrices.
References
----------
.. [1] Pham, Dinh Tuan. "Joint approximate diagonalization of positive
definite Hermitian matrices." SIAM Journal on Matrix Analysis and
Applications 22, no. 4 (2001): 1136-1152.
"""
# Adapted from http://github.com/alexandrebarachant/pyRiemann
n_epochs = X.shape[0]
# Reshape input matrix
A = np.concatenate(X, axis=0).T
# Init variables
n_times, n_m = A.shape
V = np.eye(n_times)
epsilon = n_times * (n_times - 1) * eps
for it in range(max_iter):
decr = 0
for ii in range(1, n_times):
for jj in range(ii):
Ii = np.arange(ii, n_m, n_times)
Ij = np.arange(jj, n_m, n_times)
c1 = A[ii, Ii]
c2 = A[jj, Ij]
g12 = np.mean(A[ii, Ij] / c1)
g21 = np.mean(A[ii, Ij] / c2)
omega21 = np.mean(c1 / c2)
omega12 = np.mean(c2 / c1)
omega = np.sqrt(omega12 * omega21)
tmp = np.sqrt(omega21 / omega12)
tmp1 = (tmp * g12 + g21) / (omega + 1)
tmp2 = (tmp * g12 - g21) / max(omega - 1, 1e-9)
h12 = tmp1 + tmp2
h21 = np.conj((tmp1 - tmp2) / tmp)
decr += n_epochs * (g12 * np.conj(h12) + g21 * h21) / 2.0
tmp = 1 + 1.j * 0.5 * np.imag(h12 * h21)
tmp = np.real(tmp + np.sqrt(tmp ** 2 - h12 * h21))
tau = np.array([[1, -h12 / tmp], [-h21 / tmp, 1]])
A[[ii, jj], :] = np.dot(tau, A[[ii, jj], :])
tmp = np.c_[A[:, Ii], A[:, Ij]]
tmp = np.reshape(tmp, (n_times * n_epochs, 2), order='F')
tmp = np.dot(tmp, tau.T)
tmp = np.reshape(tmp, (n_times, n_epochs * 2), order='F')
A[:, Ii] = tmp[:, :n_epochs]
A[:, Ij] = tmp[:, n_epochs:]
V[[ii, jj], :] = np.dot(tau, V[[ii, jj], :])
if decr < epsilon:
break
D = np.reshape(A, (n_times, -1, n_times)).transpose(1, 0, 2)
return V, D
@fill_doc
class SPoC(CSP):
"""Implementation of the SPoC spatial filtering.
Source Power Comodulation (SPoC) :footcite:`DahneEtAl2014` allows to
extract spatial filters and
patterns by using a target (continuous) variable in the decomposition
process in order to give preference to components whose power correlates
with the target variable.
SPoC can be seen as an extension of the CSP driven by a continuous
variable rather than a discrete variable. Typical applications include
extraction of motor patterns using EMG power or audio patterns using sound
envelope.
Parameters
----------
n_components : int
The number of components to decompose M/EEG signals.
reg : float | str | None (default None)
If not None (same as ``'empirical'``, default), allow
regularization for covariance estimation.
If float, shrinkage is used (0 <= shrinkage <= 1).
For str options, ``reg`` will be passed to ``method`` to
:func:`mne.compute_covariance`.
log : None | bool (default None)
If transform_into == 'average_power' and log is None or True, then
applies a log transform to standardize the features, else the features
are z-scored. If transform_into == 'csp_space', then log must be None.
transform_into : {'average_power', 'csp_space'}
If 'average_power' then self.transform will return the average power of
each spatial filter. If 'csp_space' self.transform will return the data
in CSP space. Defaults to 'average_power'.
cov_method_params : dict | None
Parameters to pass to :func:`mne.compute_covariance`.
.. versionadded:: 0.16
%(rank_None)s
.. versionadded:: 0.17
Attributes
----------
filters_ : ndarray, shape (n_channels, n_channels)
If fit, the SPoC spatial filters, else None.
patterns_ : ndarray, shape (n_channels, n_channels)
If fit, the SPoC spatial patterns, else None.
mean_ : ndarray, shape (n_components,)
If fit, the mean squared power for each component.
std_ : ndarray, shape (n_components,)
If fit, the std squared power for each component.
See Also
--------
mne.preprocessing.Xdawn, CSP
References
----------
.. footbibliography::
"""
def __init__(self, n_components=4, reg=None, log=None,
transform_into='average_power', cov_method_params=None,
rank=None):
"""Init of SPoC."""
super(SPoC, self).__init__(n_components=n_components, reg=reg, log=log,
cov_est="epoch", norm_trace=False,
transform_into=transform_into, rank=rank,
cov_method_params=cov_method_params)
# Covariance estimation have to be done on the single epoch level,
# unlike CSP where covariance estimation can also be achieved through
# concatenation of all epochs from the same class.
delattr(self, 'cov_est')
delattr(self, 'norm_trace')
def fit(self, X, y):
"""Estimate the SPoC decomposition on epochs.
Parameters
----------
X : ndarray, shape (n_epochs, n_channels, n_times)
The data on which to estimate the SPoC.
y : array, shape (n_epochs,)
The class for each epoch.
Returns
-------
self : instance of SPoC
Returns the modified instance.
"""
from scipy import linalg
self._check_Xy(X, y)
if len(np.unique(y)) < 2:
raise ValueError("y must have at least two distinct values.")
# The following code is directly copied from pyRiemann
# Normalize target variable
target = y.astype(np.float64)
target -= target.mean()
target /= target.std()
n_epochs, n_channels = X.shape[:2]
# Estimate single trial covariance
covs = np.empty((n_epochs, n_channels, n_channels))
for ii, epoch in enumerate(X):
covs[ii] = _regularized_covariance(
epoch, reg=self.reg, method_params=self.cov_method_params,
rank=self.rank)
C = covs.mean(0)
Cz = np.mean(covs * target[:, np.newaxis, np.newaxis], axis=0)
# solve eigenvalue decomposition
evals, evecs = linalg.eigh(Cz, C)
evals = evals.real
evecs = evecs.real
# sort vectors
ix = np.argsort(np.abs(evals))[::-1]
# sort eigenvectors
evecs = evecs[:, ix].T
# spatial patterns
self.patterns_ = linalg.pinv(evecs).T # n_channels x n_channels
self.filters_ = evecs # n_channels x n_channels
pick_filters = self.filters_[:self.n_components]
X = np.asarray([np.dot(pick_filters, epoch) for epoch in X])
# compute features (mean band power)
X = (X ** 2).mean(axis=-1)
# To standardize features
self.mean_ = X.mean(axis=0)
self.std_ = X.std(axis=0)
return self
def transform(self, X):
"""Estimate epochs sources given the SPoC filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The data.
Returns
-------
X : ndarray
If self.transform_into == 'average_power' then returns the power of
CSP features averaged over time and shape (n_epochs, n_sources)
If self.transform_into == 'csp_space' then returns the data in CSP
space and shape is (n_epochs, n_sources, n_times).
"""
return super(SPoC, self).transform(X)
| bsd-3-clause |
renaud/exposure | exposure.py | 1 | 3840 | #!/usr/bin/env python
import gc, sys, os
from tornado import ioloop, web, autoreload, template
import simplejson as json
import pandas as pd
def _find_name(obj):
frame = sys._getframe()
for frame in iter(lambda: frame.f_back, None):
frame.f_locals
result = []
for referrer in gc.get_referrers(obj):
if isinstance(referrer, dict):
for k, v in referrer.iteritems():
if v is obj:
result.append(k)
return result[0]
INDEX_TEMPLATE = template.Template('''
<html><head>
<title>Exposure</title>
<link rel="stylesheet" href="//netdna.bootstrapcdn.com/bootstrap/3.1.1/css/bootstrap.min.css">
</head>
<body>
<div class="container">
<h2>Exposure</h2><p>overview of exposed objects:</p>
<table class="table table-bordered table-hover">
<thead><tr><th>Exposed object</th><th>Type</th></tr></thead>
{% for obj in objects %}
{% block obj %}
<tr>
<td><a href="/ex/{{ escape(obj['name']) }}">{{ escape(obj["name"]) }}</a></td>
<td>{{ escape(obj["name"]) }}</td>
</tr>
{% end %}
{% end %}
</table>
</div>
</body></html>''')
class Exposure(object):
"""Easily expose your python objects as a read-only REST service"""
def __init__(self, host='localhost', port=8888, static_files='static', objects=[]):
self.host = host
self.port = int(port)
self.url = 'http://{}:{}'.format(host, port)
self.static_files = str(static_files)
Exposure.exposed = { _find_name(obj):obj for obj in objects }
def add(self, obj, name=None):
'''adds an object to be exposed'''
if not name:
name = _find_name(obj) # guess
print 'ADD: {}::{}'.format(name, obj)
Exposure.exposed[name] = obj
class MainHandler(web.RequestHandler):
''' serves index.html'''
def get(self):
objects = [{'name': name, 'type': type(obj).__name__} for name, obj in Exposure.exposed.iteritems()]
self.finish(INDEX_TEMPLATE.generate(objects=objects))
class ExposureHandler(web.RequestHandler):
'''exposes objects'''
def get(self, obj_name):
obj = Exposure.exposed[obj_name]
if type(obj) in [pd.Series, pd.DataFrame]:
self.write(obj.to_json())
else:
self.write(json.dumps(obj, use_decimal=True))
class ExposureQueryHandler(web.RequestHandler):
'''exposes queries to objects'''
def get(self, obj_name, query):
try:
obj = Exposure.exposed[obj_name]
if type(obj) in [tuple, list, dict, pd.Series]:
if type(obj) in [tuple, list, pd.Series]:
query = int(query)
self.write(json.dumps(obj[query], use_decimal=True))
elif type(obj) is pd.DataFrame:
self.write(obj.query(query).to_json())
except Exception, e:
self.set_status(400)
self.write(json.dumps(e))
def start(self):
'''start REST server'''
application = web.Application([
(r'/', Exposure.MainHandler),
(r'/ex/(.*)/(.*)', Exposure.ExposureQueryHandler),
(r'/ex/(.*)', Exposure.ExposureHandler),
(r'/static/(.*)', web.StaticFileHandler, {'path': self.static_files+'/'})
], gzip=True)
application.listen(self.port)
# autoreload files in static_files directory
autoreload.start()
for dir, _, files in os.walk(self.static_files):
[autoreload.watch(dir + '/' + f) for f in files if not f.startswith('.')]
print 'listening on {} ...'.format(self.port)
ioloop.IOLoop.instance().start()
| apache-2.0 |
TomAugspurger/pandas | pandas/tests/extension/arrow/test_bool.py | 1 | 2766 | import numpy as np
import pytest
from pandas.compat import PY37
import pandas as pd
import pandas._testing as tm
from pandas.tests.extension import base
pytest.importorskip("pyarrow", minversion="0.13.0")
from .arrays import ArrowBoolArray, ArrowBoolDtype # isort:skip
@pytest.fixture
def dtype():
return ArrowBoolDtype()
@pytest.fixture
def data():
values = np.random.randint(0, 2, size=100, dtype=bool)
values[1] = ~values[0]
return ArrowBoolArray.from_scalars(values)
@pytest.fixture
def data_missing():
return ArrowBoolArray.from_scalars([None, True])
class BaseArrowTests:
pass
class TestDtype(BaseArrowTests, base.BaseDtypeTests):
def test_array_type_with_arg(self, data, dtype):
pytest.skip("GH-22666")
class TestInterface(BaseArrowTests, base.BaseInterfaceTests):
def test_copy(self, data):
# __setitem__ does not work, so we only have a smoke-test
data.copy()
def test_view(self, data):
# __setitem__ does not work, so we only have a smoke-test
data.view()
class TestConstructors(BaseArrowTests, base.BaseConstructorsTests):
def test_from_dtype(self, data):
pytest.skip("GH-22666")
# seems like some bug in isna on empty BoolArray returning floats.
@pytest.mark.xfail(reason="bad is-na for empty data")
def test_from_sequence_from_cls(self, data):
super().test_from_sequence_from_cls(data)
@pytest.mark.skipif(not PY37, reason="timeout on Linux py36_locale")
@pytest.mark.xfail(reason="pa.NULL is not recognised as scalar, GH-33899")
def test_series_constructor_no_data_with_index(self, dtype, na_value):
# pyarrow.lib.ArrowInvalid: only handle 1-dimensional arrays
super().test_series_constructor_no_data_with_index(dtype, na_value)
@pytest.mark.skipif(not PY37, reason="timeout on Linux py36_locale")
@pytest.mark.xfail(reason="pa.NULL is not recognised as scalar, GH-33899")
def test_series_constructor_scalar_na_with_index(self, dtype, na_value):
# pyarrow.lib.ArrowInvalid: only handle 1-dimensional arrays
super().test_series_constructor_scalar_na_with_index(dtype, na_value)
@pytest.mark.xfail(reason="raises AssertionError")
def test_construct_empty_dataframe(self, dtype):
super().test_construct_empty_dataframe(dtype)
class TestReduce(base.BaseNoReduceTests):
def test_reduce_series_boolean(self):
pass
class TestReduceBoolean(base.BaseBooleanReduceTests):
pass
def test_is_bool_dtype(data):
assert pd.api.types.is_bool_dtype(data)
assert pd.core.common.is_bool_indexer(data)
s = pd.Series(range(len(data)))
result = s[data]
expected = s[np.asarray(data)]
tm.assert_series_equal(result, expected)
| bsd-3-clause |
herilalaina/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
mathemage/h2o-3 | h2o-docs/src/api/data-science-example-1/example-native-pandas-scikit.py | 22 | 2796 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
from pandas import Series, DataFrame
import pandas as pd
import numpy as np
import sklearn
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import preprocessing
# <codecell>
air_raw = DataFrame.from_csv("allyears_tiny.csv", index_col = False)
print(air_raw.head())
air_raw['RandNum'] = Series(np.random.uniform(size = len(air_raw['Origin'])))
print(air_raw.head())
# <codecell>
air_mapped = DataFrame()
air_mapped['RandNum'] = air_raw['RandNum']
air_mapped['IsDepDelayed'] = air_raw['IsDepDelayed']
air_mapped['IsDepDelayedInt'] = air_mapped.apply(lambda row:
1 if row['IsDepDelayed'] == 'YES' else 0,
axis=1)
del air_mapped['IsDepDelayed']
print(air_mapped.shape)
lb_origin = sklearn.preprocessing.LabelBinarizer()
lb_origin.fit(air_raw['Origin'])
tmp_origin = lb_origin.transform(air_raw['Origin'])
tmp_origin_df = DataFrame(tmp_origin)
print(tmp_origin_df.shape)
lb_dest = sklearn.preprocessing.LabelBinarizer()
lb_dest.fit(air_raw['Dest'])
tmp_dest = lb_origin.transform(air_raw['Dest'])
tmp_dest_df = DataFrame(tmp_dest)
print(tmp_dest_df.shape)
lb_uniquecarrier = sklearn.preprocessing.LabelBinarizer()
lb_uniquecarrier.fit(air_raw['UniqueCarrier'])
tmp_uniquecarrier = lb_origin.transform(air_raw['UniqueCarrier'])
tmp_uniquecarrier_df = DataFrame(tmp_uniquecarrier)
print(tmp_uniquecarrier_df.shape)
air_mapped = pd.concat([
air_mapped,
tmp_origin_df,
tmp_dest_df,
air_raw['Distance'],
tmp_uniquecarrier_df,
air_raw['Month'],
air_raw['DayofMonth'],
air_raw['DayOfWeek'],
],
axis=1)
print(air_mapped.shape)
air_mapped
air = air_mapped
# <codecell>
air_train = air.ix[air['RandNum'] <= 0.8]
# air_valid = air.ix[(air['RandNum'] > 0.8) & (air['RandNum'] <= 0.9)]
air_test = air.ix[air['RandNum'] > 0.9]
print(air_train.shape)
print(air_test.shape)
# <codecell>
X_train = air_train.copy(deep=True)
del X_train['RandNum']
del X_train['IsDepDelayedInt']
print(list(X_train.columns.values))
print(X_train.shape)
y_train = air_train['IsDepDelayedInt']
print(y_train.shape)
# <codecell>
clf = GradientBoostingClassifier(n_estimators = 10, max_depth = 3, learning_rate = 0.01)
clf.fit(X_train, y_train)
# <codecell>
X_test = air_test.copy(deep=True)
del X_test['RandNum']
del X_test['IsDepDelayedInt']
print(list(X_test.columns.values))
print(X_test.shape)
print("")
print("--- PREDICTIONS ---")
print("")
pred = clf.predict(X_test)
print(pred)
| apache-2.0 |
criffy/aflengine | tipengine/tipengine.py | 1 | 4034 | #!/usr/bin/env pyton3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 2 20:34:20 2020
@author: chris
"""
from resources.team_name_map import team_name_map
import pandas as pd
from os.path import (
dirname,
abspath
)
import math
home_dir = dirname(dirname(abspath(__file__)))
matches = pd.read_csv(home_dir + "/bench/matches.csv")
def standardise_teams(fixture: pd.DataFrame):
"""
Update team names to match those used throughout repo
"""
for idx,row in fixture.iterrows():
home = row['hometeam']
away = row['awayteam']
if home in team_name_map:
fixture.at[idx,'hometeam'] = team_name_map[home]
if away in team_name_map:
fixture.at[idx,'awayteam'] = team_name_map[away]
return fixture
def get_winner(homescore,awayscore):
if homescore > awayscore:
return 'H'
elif homescore < awayscore:
return 'A'
else:
return 'D'
def format_fixture(fixture: pd.DataFrame):
"""
Change the layout of the fixture as it comes in from the web
"""
fixture.rename(columns={"Round Number": "round", "Date": "date", \
"Location": "venue", "Home Team": "hometeam", "Away Team": "awayteam"}, \
inplace = True)
fixture['homescore'] = None
fixture['awayscore'] = None
fixture['winner'] = None
for idx, row in fixture.iterrows():
try:
if math.isnan(row['Result']):
break
except TypeError:
fixture.at[idx,'homescore'] = str(row['Result']).split(' - ')[0]
fixture.at[idx,'awayscore'] = str(row['Result']).split(' - ')[1]
fixture.at[idx,'winner'] = get_winner(str(row['Result'].split(' - ')[0]),str(row['Result'].split(' - ')[1]))
fixture.drop('Result',axis=1,inplace=True)
return fixture
def fill_fixture_scores(fixture: pd.DataFrame, round_num: int, matches: pd.DataFrame, season: int = 2020):
"""
Given a round of the season of the fixture, fill out the results of that round
"""
fixture_round = fixture[fixture['round'] == str(round_num)]
for idx, row in fixture_round.iterrows():
match = matches[(matches['hteam'] == row['hometeam']) \
& (matches['ateam'] == row['awayteam']) \
& (matches['season'] == season) & (matches['round'] == str(round_num))]
fixture.at[idx,'homescore'] = match['hscore']
fixture.at[idx,'awayscore'] = match['ascore']
if match['hscore'].iloc[0] > match['ascore'].iloc[0]:
fixture.at[idx,'winner'] = 'H'
elif match['hscore'].iloc[0] < match['ascore'].iloc[0]:
fixture.at[idx,'winner'] = 'A'
else:
fixture.at[idx,'winner'] = 'D'
return fixture
def get_total_tips(tally: pd.DataFrame):
"""
Aggregate season tips for each tipster
"""
tally['total'] = None
for idx,row in tally.iterrows():
tally.at[idx,'total'] = row.sum()
tally.sort_values(by='total', inplace=True, ascending=False)
return tally
fixture2020 = pd.read_csv('fixture2020_original.csv')
fixture2020 = format_fixture(fixture2020)
tips2020 = pd.read_csv('tips2020.csv')
tally2020 = pd.read_csv('tally2020.csv',index_col='tipper')
rnd = "2"
tippers = tally2020.index.values
fixture2020['key'] = str(fixture2020['round']) + fixture2020['hometeam'] + fixture2020['awayteam']
fixture2020 = standardise_teams(fixture2020)
fixture2020 = fill_fixture_scores(fixture2020, round_num=1, matches=matches)
tips2020['key'] = tips2020['round'] + tips2020['hometeam'] + tips2020['awayteam']
combined = fixture2020.merge(tips2020,on='key',how='inner')
for tipper in tippers:
score = 0
games = combined.loc[combined['round_x'] == rnd]
for index, row in games.iterrows():
if(row[tipper] == row['winner']):
score += 1
tally2020.at[tipper,"r"+rnd] = score
fixture2020.drop('key',inplace=True,axis=1)
tally2020 = get_total_tips(tally2020)
fixture2020.to_csv('fixture2020.csv')
tally2020.to_csv('tally2020.csv') | gpl-3.0 |
JPFrancoia/scikit-learn | sklearn/cluster/__init__.py | 364 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
serendio-labs-stage/diskoveror-datapreprocessing-python | premodelling routines/dummyvariable/createiv.py | 3 | 1653 | '''
Copyright 2015 Serendio Inc.
Author - kshitij soni
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
'''
from numpy import mean
import numpy as np
import pandas as pd
import csv
from collections import defaultdict
from pandas import DataFrame, Series
from StringIO import StringIO
import scipy
import matplotlib.pyplot
import matplotlib.pyplot as plt
import math as mt
import scipy.stats as stats
def createiv(str,list,matrix=0,zero=1):
s=list
w = pd.read_csv(str)
frame = DataFrame(w)
if matrix==1:
dummy_ranks = pd.get_dummies(w[s], prefix=s)
dummy_ranks[s]=w[s]
print dummy_ranks
if zero==1:
for i in range(0,len(s)):
sg = s[i]
t=np.unique(w[sg])
f=len(w)
df_ = pd.DataFrame(columns=t,index=np.arange(f))
df_ = df_.fillna(0)
length=len(t)
count =0
for j in w[sg]:
for i in range(length):
if j == t[i]:
df_.at[count,t[i]]=1
count = count + 1
df_=df_.drop(df_.columns[0],axis=1)
print df_
| apache-2.0 |
WangWenjun559/Weiss | summary/sumy/sklearn/svm/setup.py | 321 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| apache-2.0 |
Quantipy/quantipy | quantipy/core/tools/dp/dimensions/dimlabels.py | 1 | 2376 | #-*- coding: utf-8 -*-
"""
Created on 21 Nov 2017
"""
import json
import pandas as pd
import numpy as np
import quantipy as qp
qp_dim_languages = {
'en-GB': 'ENG',
'sv-SE': 'SVE',
'da-DK': 'DAN',
'fi-FI': 'FIN',
'nb-NO': 'NOR',
'de-DE': 'DEU',
'fr-FR': 'FRA',
'zh-CN': 'CHS',
'id-ID': 'IND',
'ms-MY': 'MSL',
'th-TH': 'THA'
}
class DimLabels():
"""
"""
def __init__(self, name, text_key='en-GB'):
self.name = name
self.text_key = text_key
self.text = {}
self.labels = []
self.incl_languages = []
self.incl_labeltypes = []
def add_text(self, text_object, replace=True):
if isinstance(text_object, (str, unicode)):
text_object = {self.text_key: text_object}
self.text = text_object
self.labels_from_text(replace)
self._lang_ltype_from_label(replace)
return None
def _lang_ltype_from_label(self, replace=True):
if replace:
self.incl_languages = []
self.incl_labeltypes = []
for lab in self.labels:
if not lab.language in self.incl_languages:
self.incl_languages.append(lab.language)
if lab.labeltype and not lab.labeltype in self.incl_labeltypes:
self.incl_labeltypes.append(lab.labeltype)
return None
def labels_from_text(self, replace=True):
if replace: self.labels = []
for item in self.text.items():
if isinstance(item[1], dict):
for e_item in item[1].items():
dimlabel = DimLabel(e_item, item[0], self.text_key)
if not self._label_exists(dimlabel):
self.labels.append(dimlabel)
else:
dimlabel = DimLabel(item, None, self.text_key)
if not self._label_exists(dimlabel):
self.labels.append(dimlabel)
return None
def _label_exists(self, label):
return any(d_l.language == label.language and
d_l.labeltype == label.labeltype
for d_l in self.labels)
class DimLabel():
"""
"""
def __init__(self, text=None, edit=None, text_key=None):
self.text = ''
self.language = ''
self.default_lan = qp_dim_languages.get(text_key, 'ENG')
self.labeltype = None
if text:
self.to_dim(text, edit)
def to_dim(self, text, edit=None):
if isinstance(text, (str, unicode)):
self.language = self.default_lan
self.text = text
else:
self.language = qp_dim_languages.get(text[0], 'ENG')
self.text = text[1]
self.text = self.text.replace('\n', ' ').replace('"', '')
self.labeltype = edit
return None
| mit |
alfonsokim/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_qt4.py | 69 | 20664 | from __future__ import division
import math
import os
import sys
import matplotlib
from matplotlib import verbose
from matplotlib.cbook import is_string_like, onetrue
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, IdleEvent, cursors
from matplotlib._pylab_helpers import Gcf
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
from matplotlib.widgets import SubplotTool
try:
from PyQt4 import QtCore, QtGui, Qt
except ImportError:
raise ImportError("Qt4 backend requires that PyQt4 is installed.")
backend_version = "0.9.1"
def fn_name(): return sys._getframe(1).f_code.co_name
DEBUG = False
cursord = {
cursors.MOVE : QtCore.Qt.SizeAllCursor,
cursors.HAND : QtCore.Qt.PointingHandCursor,
cursors.POINTER : QtCore.Qt.ArrowCursor,
cursors.SELECT_REGION : QtCore.Qt.CrossCursor,
}
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager != None:
figManager.canvas.draw()
def _create_qApp():
"""
Only one qApp can exist at a time, so check before creating one.
"""
if QtGui.QApplication.startingUp():
if DEBUG: print "Starting up QApplication"
global qApp
qApp = QtGui.QApplication( [" "] )
QtCore.QObject.connect( qApp, QtCore.SIGNAL( "lastWindowClosed()" ),
qApp, QtCore.SLOT( "quit()" ) )
#remember that matplotlib created the qApp - will be used by show()
_create_qApp.qAppCreatedHere = True
_create_qApp.qAppCreatedHere = False
def show():
"""
Show all the figures and enter the qt main loop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.window.show()
if DEBUG: print 'Inside show'
figManager = Gcf.get_active()
if figManager != None:
figManager.canvas.draw()
if _create_qApp.qAppCreatedHere:
QtGui.qApp.exec_()
def new_figure_manager( num, *args, **kwargs ):
"""
Create a new figure manager instance
"""
thisFig = Figure( *args, **kwargs )
canvas = FigureCanvasQT( thisFig )
manager = FigureManagerQT( canvas, num )
return manager
class FigureCanvasQT( QtGui.QWidget, FigureCanvasBase ):
keyvald = { QtCore.Qt.Key_Control : 'control',
QtCore.Qt.Key_Shift : 'shift',
QtCore.Qt.Key_Alt : 'alt',
}
# left 1, middle 2, right 3
buttond = {1:1, 2:3, 4:2}
def __init__( self, figure ):
if DEBUG: print 'FigureCanvasQt: ', figure
_create_qApp()
QtGui.QWidget.__init__( self )
FigureCanvasBase.__init__( self, figure )
self.figure = figure
self.setMouseTracking( True )
# hide until we can test and fix
#self.startTimer(backend_IdleEvent.milliseconds)
w,h = self.get_width_height()
self.resize( w, h )
def __timerEvent(self, event):
# hide until we can test and fix
self.mpl_idle_event(event)
def enterEvent(self, event):
FigureCanvasBase.enter_notify_event(self, event)
def leaveEvent(self, event):
FigureCanvasBase.leave_notify_event(self, event)
def mousePressEvent( self, event ):
x = event.pos().x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.pos().y()
button = self.buttond[event.button()]
FigureCanvasBase.button_press_event( self, x, y, button )
if DEBUG: print 'button pressed:', event.button()
def mouseMoveEvent( self, event ):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
FigureCanvasBase.motion_notify_event( self, x, y )
#if DEBUG: print 'mouse move'
def mouseReleaseEvent( self, event ):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
button = self.buttond[event.button()]
FigureCanvasBase.button_release_event( self, x, y, button )
if DEBUG: print 'button released'
def keyPressEvent( self, event ):
key = self._get_key( event )
FigureCanvasBase.key_press_event( self, key )
if DEBUG: print 'key press', key
def keyReleaseEvent( self, event ):
key = self._get_key(event)
FigureCanvasBase.key_release_event( self, key )
if DEBUG: print 'key release', key
def resizeEvent( self, event ):
if DEBUG: print 'resize (%d x %d)' % (event.size().width(), event.size().height())
QtGui.QWidget.resizeEvent( self, event )
w = event.size().width()
h = event.size().height()
if DEBUG: print "FigureCanvasQtAgg.resizeEvent(", w, ",", h, ")"
dpival = self.figure.dpi
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches( winch, hinch )
self.draw()
def resize( self, w, h ):
# Pass through to Qt to resize the widget.
QtGui.QWidget.resize( self, w, h )
# Resize the figure by converting pixels to inches.
pixelPerInch = self.figure.dpi
wInch = w / pixelPerInch
hInch = h / pixelPerInch
self.figure.set_size_inches( wInch, hInch )
# Redraw everything.
self.draw()
def sizeHint( self ):
w, h = self.get_width_height()
return QtCore.QSize( w, h )
def minumumSizeHint( self ):
return QtCore.QSize( 10, 10 )
def _get_key( self, event ):
if event.key() < 256:
key = str(event.text())
elif event.key() in self.keyvald:
key = self.keyvald[ event.key() ]
else:
key = None
return key
def flush_events(self):
Qt.qApp.processEvents()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerQT( FigureManagerBase ):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The qt.QToolBar
window : The qt.QMainWindow
"""
def __init__( self, canvas, num ):
if DEBUG: print 'FigureManagerQT.%s' % fn_name()
FigureManagerBase.__init__( self, canvas, num )
self.canvas = canvas
self.window = QtGui.QMainWindow()
self.window.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.window.setWindowTitle("Figure %d" % num)
image = os.path.join( matplotlib.rcParams['datapath'],'images','matplotlib.png' )
self.window.setWindowIcon(QtGui.QIcon( image ))
# Give the keyboard focus to the figure instead of the manager
self.canvas.setFocusPolicy( QtCore.Qt.ClickFocus )
self.canvas.setFocus()
QtCore.QObject.connect( self.window, QtCore.SIGNAL( 'destroyed()' ),
self._widgetclosed )
self.window._destroying = False
self.toolbar = self._get_toolbar(self.canvas, self.window)
self.window.addToolBar(self.toolbar)
QtCore.QObject.connect(self.toolbar, QtCore.SIGNAL("message"),
self.window.statusBar().showMessage)
self.window.setCentralWidget(self.canvas)
if matplotlib.is_interactive():
self.window.show()
# attach a show method to the figure for pylab ease of use
self.canvas.figure.show = lambda *args: self.window.show()
def notify_axes_change( fig ):
# This will be called whenever the current axes is changed
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver( notify_axes_change )
def _widgetclosed( self ):
if self.window._destroying: return
self.window._destroying = True
Gcf.destroy(self.num)
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar'] == 'classic':
print "Classic toolbar is not supported"
elif matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2QT(canvas, parent, False)
else:
toolbar = None
return toolbar
def resize(self, width, height):
'set the canvas size in pixels'
self.window.resize(width, height)
def destroy( self, *args ):
if self.window._destroying: return
self.window._destroying = True
QtCore.QObject.disconnect( self.window, QtCore.SIGNAL( 'destroyed()' ),
self._widgetclosed )
if self.toolbar: self.toolbar.destroy()
if DEBUG: print "destroy figure manager"
self.window.close()
def set_window_title(self, title):
self.window.setWindowTitle(title)
class NavigationToolbar2QT( NavigationToolbar2, QtGui.QToolBar ):
def __init__(self, canvas, parent, coordinates=True):
""" coordinates: should we show the coordinates on the right? """
self.canvas = canvas
self.coordinates = coordinates
QtGui.QToolBar.__init__( self, parent )
NavigationToolbar2.__init__( self, canvas )
def _icon(self, name):
return QtGui.QIcon(os.path.join(self.basedir, name))
def _init_toolbar(self):
self.basedir = os.path.join(matplotlib.rcParams[ 'datapath' ],'images')
a = self.addAction(self._icon('home.svg'), 'Home', self.home)
a.setToolTip('Reset original view')
a = self.addAction(self._icon('back.svg'), 'Back', self.back)
a.setToolTip('Back to previous view')
a = self.addAction(self._icon('forward.svg'), 'Forward', self.forward)
a.setToolTip('Forward to next view')
self.addSeparator()
a = self.addAction(self._icon('move.svg'), 'Pan', self.pan)
a.setToolTip('Pan axes with left mouse, zoom with right')
a = self.addAction(self._icon('zoom_to_rect.svg'), 'Zoom', self.zoom)
a.setToolTip('Zoom to rectangle')
self.addSeparator()
a = self.addAction(self._icon('subplots.png'), 'Subplots',
self.configure_subplots)
a.setToolTip('Configure subplots')
a = self.addAction(self._icon('filesave.svg'), 'Save',
self.save_figure)
a.setToolTip('Save the figure')
self.buttons = {}
# Add the x,y location widget at the right side of the toolbar
# The stretch factor is 1 which means any resizing of the toolbar
# will resize this label instead of the buttons.
if self.coordinates:
self.locLabel = QtGui.QLabel( "", self )
self.locLabel.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTop )
self.locLabel.setSizePolicy(
QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Ignored))
labelAction = self.addWidget(self.locLabel)
labelAction.setVisible(True)
# reference holder for subplots_adjust window
self.adj_window = None
def dynamic_update( self ):
self.canvas.draw()
def set_message( self, s ):
self.emit(QtCore.SIGNAL("message"), s)
if self.coordinates:
self.locLabel.setText(s.replace(', ', '\n'))
def set_cursor( self, cursor ):
if DEBUG: print 'Set cursor' , cursor
QtGui.QApplication.restoreOverrideCursor()
QtGui.QApplication.setOverrideCursor( QtGui.QCursor( cursord[cursor] ) )
def draw_rubberband( self, event, x0, y0, x1, y1 ):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [ int(val)for val in min(x0,x1), min(y0, y1), w, h ]
self.canvas.drawRectangle( rect )
def configure_subplots(self):
self.adj_window = QtGui.QMainWindow()
win = self.adj_window
win.setAttribute(QtCore.Qt.WA_DeleteOnClose)
win.setWindowTitle("Subplot Configuration Tool")
image = os.path.join( matplotlib.rcParams['datapath'],'images','matplotlib.png' )
win.setWindowIcon(QtGui.QIcon( image ))
tool = SubplotToolQt(self.canvas.figure, win)
win.setCentralWidget(tool)
win.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
win.show()
def _get_canvas(self, fig):
return FigureCanvasQT(fig)
def save_figure( self ):
filetypes = self.canvas.get_supported_filetypes_grouped()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
default_filetype = self.canvas.get_default_filetype()
start = "image." + default_filetype
filters = []
selectedFilter = None
for name, exts in sorted_filetypes:
exts_list = " ".join(['*.%s' % ext for ext in exts])
filter = '%s (%s)' % (name, exts_list)
if default_filetype in exts:
selectedFilter = filter
filters.append(filter)
filters = ';;'.join(filters)
fname = QtGui.QFileDialog.getSaveFileName(
self, "Choose a filename to save to", start, filters, selectedFilter)
if fname:
try:
self.canvas.print_figure( unicode(fname) )
except Exception, e:
QtGui.QMessageBox.critical(
self, "Error saving file", str(e),
QtGui.QMessageBox.Ok, QtGui.QMessageBox.NoButton)
class SubplotToolQt( SubplotTool, QtGui.QWidget ):
def __init__(self, targetfig, parent):
QtGui.QWidget.__init__(self, None)
self.targetfig = targetfig
self.parent = parent
self.sliderleft = QtGui.QSlider(QtCore.Qt.Horizontal)
self.sliderbottom = QtGui.QSlider(QtCore.Qt.Vertical)
self.sliderright = QtGui.QSlider(QtCore.Qt.Horizontal)
self.slidertop = QtGui.QSlider(QtCore.Qt.Vertical)
self.sliderwspace = QtGui.QSlider(QtCore.Qt.Horizontal)
self.sliderhspace = QtGui.QSlider(QtCore.Qt.Vertical)
# constraints
QtCore.QObject.connect( self.sliderleft,
QtCore.SIGNAL( "valueChanged(int)" ),
self.sliderright.setMinimum )
QtCore.QObject.connect( self.sliderright,
QtCore.SIGNAL( "valueChanged(int)" ),
self.sliderleft.setMaximum )
QtCore.QObject.connect( self.sliderbottom,
QtCore.SIGNAL( "valueChanged(int)" ),
self.slidertop.setMinimum )
QtCore.QObject.connect( self.slidertop,
QtCore.SIGNAL( "valueChanged(int)" ),
self.sliderbottom.setMaximum )
sliders = (self.sliderleft, self.sliderbottom, self.sliderright,
self.slidertop, self.sliderwspace, self.sliderhspace, )
adjustments = ('left:', 'bottom:', 'right:', 'top:', 'wspace:', 'hspace:')
for slider, adjustment in zip(sliders, adjustments):
slider.setMinimum(0)
slider.setMaximum(1000)
slider.setSingleStep(5)
layout = QtGui.QGridLayout()
leftlabel = QtGui.QLabel('left')
layout.addWidget(leftlabel, 2, 0)
layout.addWidget(self.sliderleft, 2, 1)
toplabel = QtGui.QLabel('top')
layout.addWidget(toplabel, 0, 2)
layout.addWidget(self.slidertop, 1, 2)
layout.setAlignment(self.slidertop, QtCore.Qt.AlignHCenter)
bottomlabel = QtGui.QLabel('bottom')
layout.addWidget(QtGui.QLabel('bottom'), 4, 2)
layout.addWidget(self.sliderbottom, 3, 2)
layout.setAlignment(self.sliderbottom, QtCore.Qt.AlignHCenter)
rightlabel = QtGui.QLabel('right')
layout.addWidget(rightlabel, 2, 4)
layout.addWidget(self.sliderright, 2, 3)
hspacelabel = QtGui.QLabel('hspace')
layout.addWidget(hspacelabel, 0, 6)
layout.setAlignment(hspacelabel, QtCore.Qt.AlignHCenter)
layout.addWidget(self.sliderhspace, 1, 6)
layout.setAlignment(self.sliderhspace, QtCore.Qt.AlignHCenter)
wspacelabel = QtGui.QLabel('wspace')
layout.addWidget(wspacelabel, 4, 6)
layout.setAlignment(wspacelabel, QtCore.Qt.AlignHCenter)
layout.addWidget(self.sliderwspace, 3, 6)
layout.setAlignment(self.sliderwspace, QtCore.Qt.AlignBottom)
layout.setRowStretch(1,1)
layout.setRowStretch(3,1)
layout.setColumnStretch(1,1)
layout.setColumnStretch(3,1)
layout.setColumnStretch(6,1)
self.setLayout(layout)
self.sliderleft.setSliderPosition(int(targetfig.subplotpars.left*1000))
self.sliderbottom.setSliderPosition(\
int(targetfig.subplotpars.bottom*1000))
self.sliderright.setSliderPosition(\
int(targetfig.subplotpars.right*1000))
self.slidertop.setSliderPosition(int(targetfig.subplotpars.top*1000))
self.sliderwspace.setSliderPosition(\
int(targetfig.subplotpars.wspace*1000))
self.sliderhspace.setSliderPosition(\
int(targetfig.subplotpars.hspace*1000))
QtCore.QObject.connect( self.sliderleft,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funcleft )
QtCore.QObject.connect( self.sliderbottom,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funcbottom )
QtCore.QObject.connect( self.sliderright,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funcright )
QtCore.QObject.connect( self.slidertop,
QtCore.SIGNAL( "valueChanged(int)" ),
self.functop )
QtCore.QObject.connect( self.sliderwspace,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funcwspace )
QtCore.QObject.connect( self.sliderhspace,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funchspace )
def funcleft(self, val):
if val == self.sliderright.value():
val -= 1
self.targetfig.subplots_adjust(left=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def funcright(self, val):
if val == self.sliderleft.value():
val += 1
self.targetfig.subplots_adjust(right=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def funcbottom(self, val):
if val == self.slidertop.value():
val -= 1
self.targetfig.subplots_adjust(bottom=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def functop(self, val):
if val == self.sliderbottom.value():
val += 1
self.targetfig.subplots_adjust(top=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def funcwspace(self, val):
self.targetfig.subplots_adjust(wspace=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def funchspace(self, val):
self.targetfig.subplots_adjust(hspace=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def error_msg_qt( msg, parent=None ):
if not is_string_like( msg ):
msg = ','.join( map( str,msg ) )
QtGui.QMessageBox.warning( None, "Matplotlib", msg, QtGui.QMessageBox.Ok )
def exception_handler( type, value, tb ):
"""Handle uncaught exceptions
It does not catch SystemExit
"""
msg = ''
# get the filename attribute if available (for IOError)
if hasattr(value, 'filename') and value.filename != None:
msg = value.filename + ': '
if hasattr(value, 'strerror') and value.strerror != None:
msg += value.strerror
else:
msg += str(value)
if len( msg ) : error_msg_qt( msg )
FigureManager = FigureManagerQT
| agpl-3.0 |
datapythonista/pandas | pandas/tests/io/parser/test_na_values.py | 4 | 15092 | """
Tests that NA values are properly handled during
parsing for all of the parsers defined in parsers.py
"""
from io import StringIO
import numpy as np
import pytest
from pandas._libs.parsers import STR_NA_VALUES
from pandas import (
DataFrame,
Index,
MultiIndex,
)
import pandas._testing as tm
def test_string_nas(all_parsers):
parser = all_parsers
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[["a", "b", "c"], ["d", np.nan, "f"], [np.nan, "g", "h"]],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(result, expected)
def test_detect_string_na(all_parsers):
parser = all_parsers
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = DataFrame(
[["foo", "bar"], [np.nan, "baz"], [np.nan, np.nan]], columns=["A", "B"]
)
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"na_values",
[
["-999.0", "-999"],
[-999, -999.0],
[-999.0, -999],
["-999.0"],
["-999"],
[-999.0],
[-999],
],
)
@pytest.mark.parametrize(
"data",
[
"""A,B
-999,1.2
2,-999
3,4.5
""",
"""A,B
-999,1.200
2,-999.000
3,4.500
""",
],
)
def test_non_string_na_values(all_parsers, data, na_values):
# see gh-3611: with an odd float format, we can't match
# the string "999.0" exactly but still need float matching
parser = all_parsers
expected = DataFrame([[np.nan, 1.2], [2.0, np.nan], [3.0, 4.5]], columns=["A", "B"])
result = parser.read_csv(StringIO(data), na_values=na_values)
tm.assert_frame_equal(result, expected)
def test_default_na_values(all_parsers):
_NA_VALUES = {
"-1.#IND",
"1.#QNAN",
"1.#IND",
"-1.#QNAN",
"#N/A",
"N/A",
"n/a",
"NA",
"<NA>",
"#NA",
"NULL",
"null",
"NaN",
"nan",
"-NaN",
"-nan",
"#N/A N/A",
"",
}
assert _NA_VALUES == STR_NA_VALUES
parser = all_parsers
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ""
elif i > 0:
buf = "".join([","] * i)
buf = f"{buf}{v}"
if i < nv - 1:
joined = "".join([","] * (nv - i - 1))
buf = f"{buf}{joined}"
return buf
data = StringIO("\n".join(f(i, v) for i, v in enumerate(_NA_VALUES)))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
result = parser.read_csv(data, header=None)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("na_values", ["baz", ["baz"]])
def test_custom_na_values(all_parsers, na_values):
parser = all_parsers
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = DataFrame(
[[1.0, np.nan, 3], [np.nan, 5, np.nan], [7, 8, np.nan]], columns=["A", "B", "C"]
)
result = parser.read_csv(StringIO(data), na_values=na_values, skiprows=[1])
tm.assert_frame_equal(result, expected)
def test_bool_na_values(all_parsers):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
{
"A": np.array([True, np.nan, False], dtype=object),
"B": np.array([False, True, np.nan], dtype=object),
"C": [True, False, True],
}
)
tm.assert_frame_equal(result, expected)
def test_na_value_dict(all_parsers):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
parser = all_parsers
df = parser.read_csv(StringIO(data), na_values={"A": ["foo"], "B": ["bar"]})
expected = DataFrame(
{
"A": [np.nan, "bar", np.nan, "bar"],
"B": [np.nan, "foo", np.nan, "foo"],
"C": [np.nan, "foo", np.nan, "foo"],
}
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"index_col,expected",
[
(
[0],
DataFrame({"b": [np.nan], "c": [1], "d": [5]}, index=Index([0], name="a")),
),
(
[0, 2],
DataFrame(
{"b": [np.nan], "d": [5]},
index=MultiIndex.from_tuples([(0, 1)], names=["a", "c"]),
),
),
(
["a", "c"],
DataFrame(
{"b": [np.nan], "d": [5]},
index=MultiIndex.from_tuples([(0, 1)], names=["a", "c"]),
),
),
],
)
def test_na_value_dict_multi_index(all_parsers, index_col, expected):
data = """\
a,b,c,d
0,NA,1,5
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), na_values=set(), index_col=index_col)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"kwargs,expected",
[
(
{},
DataFrame(
{
"A": ["a", "b", np.nan, "d", "e", np.nan, "g"],
"B": [1, 2, 3, 4, 5, 6, 7],
"C": ["one", "two", "three", np.nan, "five", np.nan, "seven"],
}
),
),
(
{"na_values": {"A": [], "C": []}, "keep_default_na": False},
DataFrame(
{
"A": ["a", "b", "", "d", "e", "nan", "g"],
"B": [1, 2, 3, 4, 5, 6, 7],
"C": ["one", "two", "three", "nan", "five", "", "seven"],
}
),
),
(
{"na_values": ["a"], "keep_default_na": False},
DataFrame(
{
"A": [np.nan, "b", "", "d", "e", "nan", "g"],
"B": [1, 2, 3, 4, 5, 6, 7],
"C": ["one", "two", "three", "nan", "five", "", "seven"],
}
),
),
(
{"na_values": {"A": [], "C": []}},
DataFrame(
{
"A": ["a", "b", np.nan, "d", "e", np.nan, "g"],
"B": [1, 2, 3, 4, 5, 6, 7],
"C": ["one", "two", "three", np.nan, "five", np.nan, "seven"],
}
),
),
],
)
def test_na_values_keep_default(all_parsers, kwargs, expected):
data = """\
A,B,C
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
def test_no_na_values_no_keep_default(all_parsers):
# see gh-4318: passing na_values=None and
# keep_default_na=False yields 'None" as a na_value
data = """\
A,B,C
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), keep_default_na=False)
expected = DataFrame(
{
"A": ["a", "b", "", "d", "e", "nan", "g"],
"B": [1, 2, 3, 4, 5, 6, 7],
"C": ["None", "two", "None", "nan", "five", "", "seven"],
}
)
tm.assert_frame_equal(result, expected)
def test_no_keep_default_na_dict_na_values(all_parsers):
# see gh-19227
data = "a,b\n,2"
parser = all_parsers
result = parser.read_csv(
StringIO(data), na_values={"b": ["2"]}, keep_default_na=False
)
expected = DataFrame({"a": [""], "b": [np.nan]})
tm.assert_frame_equal(result, expected)
def test_no_keep_default_na_dict_na_scalar_values(all_parsers):
# see gh-19227
#
# Scalar values shouldn't cause the parsing to crash or fail.
data = "a,b\n1,2"
parser = all_parsers
df = parser.read_csv(StringIO(data), na_values={"b": 2}, keep_default_na=False)
expected = DataFrame({"a": [1], "b": [np.nan]})
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("col_zero_na_values", [113125, "113125"])
def test_no_keep_default_na_dict_na_values_diff_reprs(all_parsers, col_zero_na_values):
# see gh-19227
data = """\
113125,"blah","/blaha",kjsdkj,412.166,225.874,214.008
729639,"qwer","",asdfkj,466.681,,252.373
"""
parser = all_parsers
expected = DataFrame(
{
0: [np.nan, 729639.0],
1: [np.nan, "qwer"],
2: ["/blaha", np.nan],
3: ["kjsdkj", "asdfkj"],
4: [412.166, 466.681],
5: ["225.874", ""],
6: [np.nan, 252.373],
}
)
result = parser.read_csv(
StringIO(data),
header=None,
keep_default_na=False,
na_values={2: "", 6: "214.008", 1: "blah", 0: col_zero_na_values},
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"na_filter,row_data",
[
(True, [[1, "A"], [np.nan, np.nan], [3, "C"]]),
(False, [["1", "A"], ["nan", "B"], ["3", "C"]]),
],
)
def test_na_values_na_filter_override(all_parsers, na_filter, row_data):
data = """\
A,B
1,A
nan,B
3,C
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), na_values=["B"], na_filter=na_filter)
expected = DataFrame(row_data, columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_na_trailing_columns(all_parsers):
parser = all_parsers
data = """Date,Currency,Symbol,Type,Units,UnitPrice,Cost,Tax
2012-03-14,USD,AAPL,BUY,1000
2012-05-12,USD,SBUX,SELL,500"""
# Trailing columns should be all NaN.
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
["2012-03-14", "USD", "AAPL", "BUY", 1000, np.nan, np.nan, np.nan],
["2012-05-12", "USD", "SBUX", "SELL", 500, np.nan, np.nan, np.nan],
],
columns=[
"Date",
"Currency",
"Symbol",
"Type",
"Units",
"UnitPrice",
"Cost",
"Tax",
],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"na_values,row_data",
[
(1, [[np.nan, 2.0], [2.0, np.nan]]),
({"a": 2, "b": 1}, [[1.0, 2.0], [np.nan, np.nan]]),
],
)
def test_na_values_scalar(all_parsers, na_values, row_data):
# see gh-12224
parser = all_parsers
names = ["a", "b"]
data = "1,2\n2,1"
result = parser.read_csv(StringIO(data), names=names, na_values=na_values)
expected = DataFrame(row_data, columns=names)
tm.assert_frame_equal(result, expected)
def test_na_values_dict_aliasing(all_parsers):
parser = all_parsers
na_values = {"a": 2, "b": 1}
na_values_copy = na_values.copy()
names = ["a", "b"]
data = "1,2\n2,1"
expected = DataFrame([[1.0, 2.0], [np.nan, np.nan]], columns=names)
result = parser.read_csv(StringIO(data), names=names, na_values=na_values)
tm.assert_frame_equal(result, expected)
tm.assert_dict_equal(na_values, na_values_copy)
def test_na_values_dict_col_index(all_parsers):
# see gh-14203
data = "a\nfoo\n1"
parser = all_parsers
na_values = {0: "foo"}
result = parser.read_csv(StringIO(data), na_values=na_values)
expected = DataFrame({"a": [np.nan, 1]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
str(2 ** 63) + "\n" + str(2 ** 63 + 1),
{"na_values": [2 ** 63]},
DataFrame([str(2 ** 63), str(2 ** 63 + 1)]),
),
(str(2 ** 63) + ",1" + "\n,2", {}, DataFrame([[str(2 ** 63), 1], ["", 2]])),
(str(2 ** 63) + "\n1", {"na_values": [2 ** 63]}, DataFrame([np.nan, 1])),
],
)
def test_na_values_uint64(all_parsers, data, kwargs, expected):
# see gh-14983
parser = all_parsers
result = parser.read_csv(StringIO(data), header=None, **kwargs)
tm.assert_frame_equal(result, expected)
def test_empty_na_values_no_default_with_index(all_parsers):
# see gh-15835
data = "a,1\nb,2"
parser = all_parsers
expected = DataFrame({"1": [2]}, index=Index(["b"], name="a"))
result = parser.read_csv(StringIO(data), index_col=0, keep_default_na=False)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"na_filter,index_data", [(False, ["", "5"]), (True, [np.nan, 5.0])]
)
def test_no_na_filter_on_index(all_parsers, na_filter, index_data):
# see gh-5239
#
# Don't parse NA-values in index unless na_filter=True
parser = all_parsers
data = "a,b,c\n1,,3\n4,5,6"
expected = DataFrame({"a": [1, 4], "c": [3, 6]}, index=Index(index_data, name="b"))
result = parser.read_csv(StringIO(data), index_col=[1], na_filter=na_filter)
tm.assert_frame_equal(result, expected)
def test_inf_na_values_with_int_index(all_parsers):
# see gh-17128
parser = all_parsers
data = "idx,col1,col2\n1,3,4\n2,inf,-inf"
# Don't fail with OverflowError with inf's and integer index column.
out = parser.read_csv(StringIO(data), index_col=[0], na_values=["inf", "-inf"])
expected = DataFrame(
{"col1": [3, np.nan], "col2": [4, np.nan]}, index=Index([1, 2], name="idx")
)
tm.assert_frame_equal(out, expected)
@pytest.mark.parametrize("na_filter", [True, False])
def test_na_values_with_dtype_str_and_na_filter(all_parsers, na_filter):
# see gh-20377
parser = all_parsers
data = "a,b,c\n1,,3\n4,5,6"
# na_filter=True --> missing value becomes NaN.
# na_filter=False --> missing value remains empty string.
empty = np.nan if na_filter else ""
expected = DataFrame({"a": ["1", "4"], "b": [empty, "5"], "c": ["3", "6"]})
result = parser.read_csv(StringIO(data), na_filter=na_filter, dtype=str)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data, na_values",
[
("false,1\n,1\ntrue", None),
("false,1\nnull,1\ntrue", None),
("false,1\nnan,1\ntrue", None),
("false,1\nfoo,1\ntrue", "foo"),
("false,1\nfoo,1\ntrue", ["foo"]),
("false,1\nfoo,1\ntrue", {"a": "foo"}),
],
)
def test_cast_NA_to_bool_raises_error(all_parsers, data, na_values):
parser = all_parsers
msg = (
"(Bool column has NA values in column [0a])|"
"(cannot safely convert passed user dtype of "
"bool for object dtyped data in column 0)"
)
with pytest.raises(ValueError, match=msg):
parser.read_csv(
StringIO(data),
header=None,
names=["a", "b"],
dtype={"a": "bool"},
na_values=na_values,
)
def test_str_nan_dropped(all_parsers):
# see gh-21131
parser = all_parsers
data = """File: small.csv,,
10010010233,0123,654
foo,,bar
01001000155,4530,898"""
result = parser.read_csv(
StringIO(data),
header=None,
names=["col1", "col2", "col3"],
dtype={"col1": str, "col2": str, "col3": str},
).dropna()
expected = DataFrame(
{
"col1": ["10010010233", "01001000155"],
"col2": ["0123", "4530"],
"col3": ["654", "898"],
},
index=[1, 3],
)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
Clyde-fare/scikit-learn | sklearn/preprocessing/label.py | 137 | 27165 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
whn09/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/dataframe_test.py | 62 | 3753 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the DataFrame class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
def setup_test_df():
"""Create a dataframe populated with some test columns."""
df = learn.DataFrame()
df["a"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor a", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
df["b"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor b", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out2")
df["c"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor c", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
return df
class DataFrameTest(test.TestCase):
"""Test of `DataFrame`."""
def test_create(self):
df = setup_test_df()
self.assertEqual(df.columns(), frozenset(["a", "b", "c"]))
def test_select_columns(self):
df = setup_test_df()
df2 = df.select_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["a", "c"]))
def test_exclude_columns(self):
df = setup_test_df()
df2 = df.exclude_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["b"]))
def test_get_item(self):
df = setup_test_df()
c1 = df["b"]
self.assertEqual(
mocks.MockTensor("Mock Tensor 2", dtypes.int32), c1.build())
def test_del_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
del df["b"]
self.assertEqual(2, len(df))
self.assertEqual(df.columns(), frozenset(["a", "c"]))
def test_set_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn",
mocks.MockTensor("Tensor ", dtypes.int32))
df["quack"] = col1
self.assertEqual(4, len(df))
col2 = df["quack"]
self.assertEqual(col1, col2)
def test_set_item_column_multi(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn", [])
col2 = mocks.MockSeries("MooColumn", [])
df["quack", "moo"] = [col1, col2]
self.assertEqual(5, len(df))
col3 = df["quack"]
self.assertEqual(col1, col3)
col4 = df["moo"]
self.assertEqual(col2, col4)
def test_set_item_pandas(self):
# TODO(jamieas)
pass
def test_set_item_numpy(self):
# TODO(jamieas)
pass
def test_build(self):
df = setup_test_df()
result = df.build()
expected = {
"a": mocks.MockTensor("Mock Tensor 1", dtypes.int32),
"b": mocks.MockTensor("Mock Tensor 2", dtypes.int32),
"c": mocks.MockTensor("Mock Tensor 1", dtypes.int32)
}
self.assertEqual(expected, result)
if __name__ == "__main__":
test.main()
| apache-2.0 |
KellyChan/python-examples | python/sklearn/examples/general/pipelining_chaining_a_PCA_and_a_logistic_regression.py | 3 | 1985 | #---------------------------------------------------------------#
# Project: Pipelining: chaining a PCA and a logistic regression
# Author: Kelly Chan
# Date: Apr 23 2014
#---------------------------------------------------------------#
print(__doc__)
import numpy as np
import pylab as pl
from sklearn import linear_model, decomposition, datasets
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
def createLogistic():
logistic = linear_model.LogisticRegression()
return logistic
def createPCA():
pca = decomposition.PCA()
return pca
def createPipeline(logistic, pca):
pipe = Pipeline(steps=[('pca', pca), \
('logistic', logistic)])
return pipe
def loadData():
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
return X_digits, y_digits
def predict(pipe, X_digits, y_digits):
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
estimator = GridSearchCV(pipe, \
dict(pca__n_components=n_components, \
logistic__C=Cs))
y_pred = estimator.fit(X_digits, y_digits)
return estimator, y_pred
def plotPCA(estimator, pca, X_digits):
pca.fit(X_digits)
pl.figure(1, figsize=(4, 3))
pl.clf()
pl.axes([.2, .2, .7, .7])
pl.plot(pca.explained_variance_, linewidth=2)
pl.axis('tight')
pl.xlabel('n_components')
pl.ylabel('explained_variance_')
pl.axvline(estimator.best_estimator_.named_steps['pca'].n_components, \
linestyle=':', \
label='n_components chosen')
pl.legend(prop=dict(size=12))
pl.show()
def test():
logistic = createLogistic()
pca = createPCA()
pipe = createPipeline(logistic, pca)
X_digits, y_digits = loadData()
estimator, y_pred = predict(pipe, X_digits, y_digits)
plotPCA(estimator, pca, X_digits)
if __name__ == '__main__':
test()
| mit |
lyft/incubator-airflow | scripts/perf/sql_queries.py | 2 | 5361 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import statistics
from time import monotonic, sleep
from typing import List, NamedTuple, Optional, Tuple
import pandas as pd
# Setup environment before any Airflow import
DAG_FOLDER = os.path.join(os.path.dirname(__file__), "dags")
os.environ["AIRFLOW__CORE__DAGS_FOLDER"] = DAG_FOLDER
os.environ["AIRFLOW__DEBUG__SQLALCHEMY_STATS"] = "True"
os.environ["AIRFLOW__CORE__LOAD_EXAMPLES"] = "False"
# Here we setup simpler logger to avoid any code changes in
# Airflow core code base
LOG_LEVEL = "INFO"
LOG_FILE = "/files/sql_stats.log" # Default to run in Breeze
os.environ[
"AIRFLOW__LOGGING__LOGGING_CONFIG_CLASS"
] = "scripts.perf.sql_queries.DEBUG_LOGGING_CONFIG"
DEBUG_LOGGING_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {"airflow": {"format": "%(message)s"}},
"handlers": {
"console": {"class": "logging.StreamHandler"},
"task": {
"class": "logging.FileHandler",
"formatter": "airflow",
"filename": LOG_FILE,
},
"processor": {
"class": "logging.FileHandler",
"formatter": "airflow",
"filename": LOG_FILE,
},
},
"loggers": {
"airflow.processor": {
"handlers": ["processor"],
"level": LOG_LEVEL,
"propagate": False,
},
"airflow.task": {"handlers": ["task"], "level": LOG_LEVEL, "propagate": False},
"flask_appbuilder": {
"handler": ["console"],
"level": LOG_LEVEL,
"propagate": True,
},
},
"root": {"handlers": ["console", "task"], "level": LOG_LEVEL},
}
class Query(NamedTuple):
function: str
file: str
location: int
sql: str
stack: str
time: float
def __str__(self):
sql = self.sql if len(self.sql) < 110 else f"{self.sql[:111]}..."
return f"{self.function} in {self.file}:{self.location}: {sql}"
def __eq__(self, other):
return (
self.function == other.function
and self.sql == other.sql
and self.location == other.location
and self.file == other.file
)
def to_dict(self):
return dict(zip(("function", "file", "location", "sql", "stack", "time"), self))
def reset_db():
from airflow.utils.db import resetdb
resetdb()
def run_scheduler_job(with_db_reset=False) -> None:
from airflow.jobs.scheduler_job import SchedulerJob
if with_db_reset:
reset_db()
SchedulerJob(subdir=DAG_FOLDER, do_pickle=False, num_runs=3).run()
def is_query(line: str) -> bool:
return "@SQLALCHEMY" in line and "|$" in line
def make_report() -> List[Query]:
queries = []
with open(LOG_FILE, "r+") as f:
raw_queries = [line for line in f.readlines() if is_query(line)]
for query in raw_queries:
t, info, stack, sql = query.replace("@SQLALCHEMY ", "").split("|$")
func, file, loc = info.split(":")
file_name = file.rpartition("/")[-1] if "/" in file else file
queries.append(
Query(
function=func.strip(),
file=file_name.strip(),
location=int(loc.strip()),
sql=sql.strip(),
stack=stack.strip(),
time=float(t.strip()),
)
)
return queries
def run_test() -> Tuple[List[Query], float]:
if os.path.exists(LOG_FILE):
os.remove(LOG_FILE)
tic = monotonic()
run_scheduler_job(with_db_reset=False)
toc = monotonic()
queries = make_report()
return queries, toc - tic
def rows_to_csv(rows: List[dict], name: Optional[str] = None) -> pd.DataFrame:
df = pd.DataFrame(rows)
name = name or f"/files/sql_stats_{int(monotonic())}.csv"
df.to_csv(name, index=False)
print(f"Saved result to {name}")
return df
def main() -> None:
reset_db()
rows = []
times = []
for i in range(4):
sleep(5)
queries, exec_time = run_test()
if i == 0:
continue
times.append(exec_time)
for qry in queries:
info = qry.to_dict()
info["test_no"] = i # type: ignore
rows.append(info)
rows_to_csv(rows, name="/files/sql_after_remote.csv")
print(times)
msg = "Time for %d dag runs: %.4fs"
if len(times) > 1:
print((msg + " (±%.3fs)") % (len(times), statistics.mean(times), statistics.stdev(times)))
else:
print(msg % (len(times), times[0]))
if __name__ == "__main__":
main()
| apache-2.0 |
amybingzhao/basic-rr-monitor | test/alarm_response_time_test.py | 1 | 5763 | import peakutils
import RPi.GPIO as GPIO
import time
import Adafruit_ADS1x15
from collections import deque
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
import matplotlib.animation as animation
from matplotlib import style
import Tkinter as tk
import numpy as np
import threading
SCREEN_WIDTH = 320
SCREEN_HEIGHT = 240
global start_time
start_time = 0
def power_on_sound():
"""
Sound the piezo buzzer for 1 second to indicate start up.
:return:
"""
for i in range(3):
GPIO.output(PIN, True)
time.sleep(0.1)
GPIO.output(PIN, False)
time.sleep(0.1)
def update_lcd():
"""
Update LCD with RR and error message if needed.
"""
a.clear()
a.plot(TIMES, WINDOW)
a.axis('off')
def sound_alarm():
"""
Sound the piezo buzzer.
:return:
"""
for i in range(10):
GPIO.output(PIN, True)
time.sleep(0.5)
GPIO.output(PIN, False)
time.sleep(0.1)
def check_alarm_conditions(RR):
"""
Check that respiratory rate is within acceptable limits
:return: return error message if outside of acceptable limits.
"""
global ALARM_TRIGGER_COUNTER, start_time
message = ""
if float(RR) < LL:
message = "Respiration rate is too low!"
elif float(RR) > UL:
message = "Respiration rate is too high!"
if message != "":
if ALARM_TRIGGER_COUNTER == 0:
start_time = time.time()
if threading.active_count() < 2 and ALARM_TRIGGER_COUNTER >= 20:
print(time.time() - start_time)
thread = AlarmThread()
thread.start()
ALARM_TRIGGER_COUNTER = ALARM_TRIGGER_COUNTER + 1
else:
ALARM_TRIGGER_COUNTER = 0
return message
def sample_data():
"""
Samples from the ADC and appends the current value to the window of data.
"""
val = adc.read_adc_difference(ADC_IN, gain=GAIN)
WINDOW.append(val)
TIMES.append(time.time() - START_TIME)
def calc_rr():
"""
Uses most recent 10 seconds of data to calculate average RR
"""
peaks = peakutils.peak.indexes(WINDOW)
try:
beats_per_second = len(peaks) / (TIMES[len(TIMES) - 1] - TIMES[0])
RR = str(beats_per_second * SECONDS_PER_MINUTE)
except ZeroDivisionError:
RR = str(-1)
return RR
def main(i):
# main loop
global RR
sample_data()
message = ""
if len(WINDOW) == WINDOW_SIZE:
RR = calc_rr()
message = check_alarm_conditions(RR)
update_lcd()
app.frame.update_labels(RR, message)
# time.sleep(DELAY)
class AlarmThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
sound_alarm()
class GUI(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
tk.Tk.wm_title(self, "RR Monitor")
container = tk.Frame(self, width=SCREEN_WIDTH, height=SCREEN_HEIGHT)
container.pack(side="top", fill="both", expand=True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
w = SCREEN_WIDTH;
h = SCREEN_HEIGHT
x = 0
y = 0
self.geometry('%dx%d+%d+%d' % (w, h, x, y))
self.frames = {}
self.frame = Graph(container, self)
self.frames[Graph] = self.frame
self.frame.grid(row=0, column=0, sticky="nsew")
self.show_frame(Graph)
def show_frame(self, cont):
self.frame = self.frames[cont]
self.frame.tkraise()
class Graph(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent, width=SCREEN_WIDTH, height=SCREEN_WIDTH)
label = tk.Label(self, text="Infant Respiration Monitor", font=LARGE_FONT)
label.pack(pady=10, padx=10)
self.rr = tk.StringVar()
self.rr.set("Respiration Rate: ")
rr_label = tk.Label(self, textvariable=self.rr, font=LARGE_FONT)
rr_label.pack(pady=10, padx=10)
canvas = FigureCanvasTkAgg(f, self)
canvas.show()
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
def update_labels(self, RR, message):
if RR == "Not enough data yet.":
self.rr.set("Respiration Rate: Not enough data yet.")
elif message == "":
self.rr.set("Respiration Rate: " + str(round(float(RR), 2)))
else:
self.rr.set(message + "(RR = " + str(round(float(RR), 2)) + ")")
if __name__ == "__main__":
LARGE_FONT = ("Verdana", 12)
style.use("ggplot")
f = Figure(figsize=(3, 1.5), dpi=100)
a = f.add_subplot(111)
# initialize ADC
adc = Adafruit_ADS1x15.ADS1115()
GAIN = 1
ADC_IN = 0
# initialize piezo
PIN = 18 # Use GPIO pin 18 as output
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN, GPIO.OUT)
# alarm conditions
LL = 10 # lower limit: 10 breaths per minute
UL = 70 # upper limit: 70 breaths per minute
RR = 'Not enough data yet.'
START_TIME = time.time()
SECONDS_PER_MINUTE = 60
FS = 1000 # Sample at 100 Hz
DELAY = float(1) / FS
WINDOW_DURATION = 10 # Determine RR from a 10-second window
# WINDOW_SIZE = int(WINDOW_DURATION / DELAY)
WINDOW_SIZE = 30
global ALARM_TRIGGER_COUNTER
ALARM_TRIGGER_COUNTER = 0
WINDOW = deque([], WINDOW_SIZE)
TIMES = deque([], WINDOW_SIZE)
app = GUI()
power_on_sound()
ani = animation.FuncAnimation(f, main, interval=1)
app.mainloop()
| mit |
wesm/arrow | python/pyarrow/filesystem.py | 4 | 14468 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import posixpath
import sys
import urllib.parse
import warnings
from os.path import join as pjoin
import pyarrow as pa
from pyarrow.util import implements, _stringify_path, _is_path_like, _DEPR_MSG
_FS_DEPR_MSG = _DEPR_MSG.format(
"filesystem.LocalFileSystem", "2.0.0", "fs.LocalFileSystem"
)
class FileSystem:
"""
Abstract filesystem interface.
"""
def cat(self, path):
"""
Return contents of file as a bytes object.
Parameters
----------
path : str
File path to read content from.
Returns
-------
contents : bytes
"""
with self.open(path, 'rb') as f:
return f.read()
def ls(self, path):
"""
Return list of file paths.
Parameters
----------
path : str
Directory to list contents from.
"""
raise NotImplementedError
def delete(self, path, recursive=False):
"""
Delete the indicated file or directory.
Parameters
----------
path : str
Path to delete.
recursive : bool, default False
If True, also delete child paths for directories.
"""
raise NotImplementedError
def disk_usage(self, path):
"""
Compute bytes used by all contents under indicated path in file tree.
Parameters
----------
path : str
Can be a file path or directory.
Returns
-------
usage : int
"""
path = _stringify_path(path)
path_info = self.stat(path)
if path_info['kind'] == 'file':
return path_info['size']
total = 0
for root, directories, files in self.walk(path):
for child_path in files:
abspath = self._path_join(root, child_path)
total += self.stat(abspath)['size']
return total
def _path_join(self, *args):
return self.pathsep.join(args)
def stat(self, path):
"""
Information about a filesystem entry.
Returns
-------
stat : dict
"""
raise NotImplementedError('FileSystem.stat')
def rm(self, path, recursive=False):
"""
Alias for FileSystem.delete.
"""
return self.delete(path, recursive=recursive)
def mv(self, path, new_path):
"""
Alias for FileSystem.rename.
"""
return self.rename(path, new_path)
def rename(self, path, new_path):
"""
Rename file, like UNIX mv command.
Parameters
----------
path : str
Path to alter.
new_path : str
Path to move to.
"""
raise NotImplementedError('FileSystem.rename')
def mkdir(self, path, create_parents=True):
"""
Create a directory.
Parameters
----------
path : str
Path to the directory.
create_parents : bool, default True
If the parent directories don't exists create them as well.
"""
raise NotImplementedError
def exists(self, path):
"""
Return True if path exists.
Parameters
----------
path : str
Path to check.
"""
raise NotImplementedError
def isdir(self, path):
"""
Return True if path is a directory.
Parameters
----------
path : str
Path to check.
"""
raise NotImplementedError
def isfile(self, path):
"""
Return True if path is a file.
Parameters
----------
path : str
Path to check.
"""
raise NotImplementedError
def _isfilestore(self):
"""
Returns True if this FileSystem is a unix-style file store with
directories.
"""
raise NotImplementedError
def read_parquet(self, path, columns=None, metadata=None, schema=None,
use_threads=True, use_pandas_metadata=False):
"""
Read Parquet data from path in file system. Can read from a single file
or a directory of files.
Parameters
----------
path : str
Single file path or directory
columns : List[str], optional
Subset of columns to read.
metadata : pyarrow.parquet.FileMetaData
Known metadata to validate files against.
schema : pyarrow.parquet.Schema
Known schema to validate files against. Alternative to metadata
argument.
use_threads : bool, default True
Perform multi-threaded column reads.
use_pandas_metadata : bool, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded.
Returns
-------
table : pyarrow.Table
"""
from pyarrow.parquet import ParquetDataset
dataset = ParquetDataset(path, schema=schema, metadata=metadata,
filesystem=self)
return dataset.read(columns=columns, use_threads=use_threads,
use_pandas_metadata=use_pandas_metadata)
def open(self, path, mode='rb'):
"""
Open file for reading or writing.
"""
raise NotImplementedError
@property
def pathsep(self):
return '/'
class LocalFileSystem(FileSystem):
_instance = None
def __init__(self):
warnings.warn(_FS_DEPR_MSG, FutureWarning, stacklevel=2)
super().__init__()
@classmethod
def _get_instance(cls):
if cls._instance is None:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cls._instance = LocalFileSystem()
return cls._instance
@classmethod
def get_instance(cls):
warnings.warn(_FS_DEPR_MSG, FutureWarning, stacklevel=2)
return cls._get_instance()
@implements(FileSystem.ls)
def ls(self, path):
path = _stringify_path(path)
return sorted(pjoin(path, x) for x in os.listdir(path))
@implements(FileSystem.mkdir)
def mkdir(self, path, create_parents=True):
path = _stringify_path(path)
if create_parents:
os.makedirs(path)
else:
os.mkdir(path)
@implements(FileSystem.isdir)
def isdir(self, path):
path = _stringify_path(path)
return os.path.isdir(path)
@implements(FileSystem.isfile)
def isfile(self, path):
path = _stringify_path(path)
return os.path.isfile(path)
@implements(FileSystem._isfilestore)
def _isfilestore(self):
return True
@implements(FileSystem.exists)
def exists(self, path):
path = _stringify_path(path)
return os.path.exists(path)
@implements(FileSystem.open)
def open(self, path, mode='rb'):
"""
Open file for reading or writing.
"""
path = _stringify_path(path)
return open(path, mode=mode)
@property
def pathsep(self):
return os.path.sep
def walk(self, path):
"""
Directory tree generator, see os.walk.
"""
path = _stringify_path(path)
return os.walk(path)
class DaskFileSystem(FileSystem):
"""
Wraps s3fs Dask filesystem implementation like s3fs, gcsfs, etc.
"""
def __init__(self, fs):
warnings.warn(
"The pyarrow.filesystem.DaskFileSystem/S3FSWrapper are deprecated "
"as of pyarrow 3.0.0, and will be removed in a future version.",
FutureWarning, stacklevel=2)
self.fs = fs
@implements(FileSystem.isdir)
def isdir(self, path):
raise NotImplementedError("Unsupported file system API")
@implements(FileSystem.isfile)
def isfile(self, path):
raise NotImplementedError("Unsupported file system API")
@implements(FileSystem._isfilestore)
def _isfilestore(self):
"""
Object Stores like S3 and GCSFS are based on key lookups, not true
file-paths.
"""
return False
@implements(FileSystem.delete)
def delete(self, path, recursive=False):
path = _stringify_path(path)
return self.fs.rm(path, recursive=recursive)
@implements(FileSystem.exists)
def exists(self, path):
path = _stringify_path(path)
return self.fs.exists(path)
@implements(FileSystem.mkdir)
def mkdir(self, path, create_parents=True):
path = _stringify_path(path)
if create_parents:
return self.fs.mkdirs(path)
else:
return self.fs.mkdir(path)
@implements(FileSystem.open)
def open(self, path, mode='rb'):
"""
Open file for reading or writing.
"""
path = _stringify_path(path)
return self.fs.open(path, mode=mode)
def ls(self, path, detail=False):
path = _stringify_path(path)
return self.fs.ls(path, detail=detail)
def walk(self, path):
"""
Directory tree generator, like os.walk.
"""
path = _stringify_path(path)
return self.fs.walk(path)
class S3FSWrapper(DaskFileSystem):
@implements(FileSystem.isdir)
def isdir(self, path):
path = _sanitize_s3(_stringify_path(path))
try:
contents = self.fs.ls(path)
if len(contents) == 1 and contents[0] == path:
return False
else:
return True
except OSError:
return False
@implements(FileSystem.isfile)
def isfile(self, path):
path = _sanitize_s3(_stringify_path(path))
try:
contents = self.fs.ls(path)
return len(contents) == 1 and contents[0] == path
except OSError:
return False
def walk(self, path, refresh=False):
"""
Directory tree generator, like os.walk.
Generator version of what is in s3fs, which yields a flattened list of
files.
"""
path = _sanitize_s3(_stringify_path(path))
directories = set()
files = set()
for key in list(self.fs._ls(path, refresh=refresh)):
path = key['Key']
if key['StorageClass'] == 'DIRECTORY':
directories.add(path)
elif key['StorageClass'] == 'BUCKET':
pass
else:
files.add(path)
# s3fs creates duplicate 'DIRECTORY' entries
files = sorted([posixpath.split(f)[1] for f in files
if f not in directories])
directories = sorted([posixpath.split(x)[1]
for x in directories])
yield path, directories, files
for directory in directories:
yield from self.walk(directory, refresh=refresh)
def _sanitize_s3(path):
if path.startswith('s3://'):
return path.replace('s3://', '')
else:
return path
def _ensure_filesystem(fs):
fs_type = type(fs)
# If the arrow filesystem was subclassed, assume it supports the full
# interface and return it
if not issubclass(fs_type, FileSystem):
if "fsspec" in sys.modules:
fsspec = sys.modules["fsspec"]
if isinstance(fs, fsspec.AbstractFileSystem):
# for recent fsspec versions that stop inheriting from
# pyarrow.filesystem.FileSystem, still allow fsspec
# filesystems (which should be compatible with our legacy fs)
return fs
raise OSError('Unrecognized filesystem: {}'.format(fs_type))
else:
return fs
def resolve_filesystem_and_path(where, filesystem=None):
"""
Return filesystem from path which could be an HDFS URI, a local URI,
or a plain filesystem path.
"""
if not _is_path_like(where):
if filesystem is not None:
raise ValueError("filesystem passed but where is file-like, so"
" there is nothing to open with filesystem.")
return filesystem, where
if filesystem is not None:
filesystem = _ensure_filesystem(filesystem)
if isinstance(filesystem, LocalFileSystem):
path = _stringify_path(where)
elif not isinstance(where, str):
raise TypeError(
"Expected string path; path-like objects are only allowed "
"with a local filesystem"
)
else:
path = where
return filesystem, path
path = _stringify_path(where)
parsed_uri = urllib.parse.urlparse(path)
if parsed_uri.scheme == 'hdfs' or parsed_uri.scheme == 'viewfs':
# Input is hdfs URI such as hdfs://host:port/myfile.parquet
netloc_split = parsed_uri.netloc.split(':')
host = netloc_split[0]
if host == '':
host = 'default'
else:
host = parsed_uri.scheme + "://" + host
port = 0
if len(netloc_split) == 2 and netloc_split[1].isnumeric():
port = int(netloc_split[1])
fs = pa.hdfs._connect(host=host, port=port)
fs_path = parsed_uri.path
elif parsed_uri.scheme == 'file':
# Input is local URI such as file:///home/user/myfile.parquet
fs = LocalFileSystem._get_instance()
fs_path = parsed_uri.path
else:
# Input is local path such as /home/user/myfile.parquet
fs = LocalFileSystem._get_instance()
fs_path = path
return fs, fs_path
| apache-2.0 |
ankurankan/scikit-learn | sklearn/covariance/tests/test_covariance.py | 28 | 10115 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
"""Tests Covariance module on a simple dataset.
"""
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
"""Tests ShrunkCovariance module on a simple dataset.
"""
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
"""Tests LedoitWolf module on a simple dataset.
"""
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# (too) large data set
X_large = np.ones((20, 200))
assert_raises(MemoryError, ledoit_wolf, X_large, block_size=100)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
X_1sample = np.arange(5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_oas():
"""Tests OAS module on a simple dataset.
"""
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
### Same tests without assuming centered data
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
X_1sample = np.arange(5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
abhishekgahlot/scikit-learn | sklearn/utils/tests/test_validation.py | 12 | 7588 | """Tests for input validation functions"""
from tempfile import NamedTemporaryFile
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true, assert_false, assert_equal
from itertools import product
from sklearn.utils import as_float_array, check_array
from sklearn.utils.estimator_checks import NotAnArray
from sklearn.random_projection import sparse_random_matrix
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.utils.validation import has_fit_parameter
def test_as_float_array():
"""Test function for as_float_array"""
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
# Checks that the return type is ok
X2 = as_float_array(X, copy=False)
np.testing.assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
# Checking that the new type is ok
np.testing.assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
# Test the copy parameter with some matrices
matrices = [
np.matrix(np.arange(5)),
sp.csc_matrix(np.arange(5)).toarray(),
sparse_random_matrix(10, 10, density=0.10).toarray()
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert_false(np.isnan(M).any())
def test_np_matrix():
"""Confirm that input validation code does not return np.matrix"""
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
def test_memmap():
"""Confirm that input validation code doesn't copy memory mapped arrays"""
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=100, dtype=np.float32)
M[:] = 0
for f in (check_array, np.asarray, asflt):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M)
X[:] = 0
def test_ordering():
"""Check that ordering is enforced correctly by validation utilities.
We need to check each validation utility, because a 'copy' without
'order=K' will kill the ordering.
"""
X = np.ones((10, 5))
for A in X, X.T:
for copy in (True, False):
B = check_array(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = check_array(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert_false(X.data.flags['C_CONTIGUOUS'])
for copy in (True, False):
Y = check_array(X, accept_sparse='csr', copy=copy, order='C')
assert_true(Y.data.flags['C_CONTIGUOUS'])
def test_check_array():
# accept_sparse == None
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
assert_raises(TypeError, check_array, X_csr)
# ensure_2d
X_array = check_array([0, 1, 2])
assert_equal(X_array.ndim, 2)
X_array = check_array([0, 1, 2], ensure_2d=False)
assert_equal(X_array.ndim, 1)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
assert_raises(ValueError, check_array, X_ndim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# force_all_finite
X_inf = np.arange(4).reshape(2, 2).astype(np.float)
X_inf[0, 0] = np.inf
assert_raises(ValueError, check_array, X_inf)
check_array(X_inf, force_all_finite=False) # no raise
# nan check
X_nan = np.arange(4).reshape(2, 2).astype(np.float)
X_nan[0, 0] = np.nan
assert_raises(ValueError, check_array, X_nan)
check_array(X_inf, force_all_finite=False) # no raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(np.int)
X_float = X_C.astype(np.float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
orders = ['C', 'F', None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
X_checked = check_array(X, dtype=dtype, accept_sparse=accept_sparse,
copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = NotAnArray(X_dense)
result = check_array(X_no_array)
assert_true(isinstance(result, np.ndarray))
def test_has_fit_parameter():
assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight"))
assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight"))
assert_true(has_fit_parameter(SVR, "sample_weight"))
assert_true(has_fit_parameter(SVR(), "sample_weight"))
| bsd-3-clause |
abimannans/scikit-learn | examples/svm/plot_svm_scale_c.py | 223 | 5375 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
MechCoder/scikit-learn | sklearn/linear_model/base.py | 2 | 20724 | """
Generalized Linear models.
"""
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Vincent Michel <[email protected]>
# Peter Prettenhofer <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck
# Maryan Morel <[email protected]>
# Giorgio Patrini <[email protected]>
# License: BSD 3 clause
from __future__ import division
from abc import ABCMeta, abstractmethod
import numbers
import warnings
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from scipy import sparse
from ..externals import six
from ..externals.joblib import Parallel, delayed
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..utils import check_array, check_X_y, deprecated, as_float_array
from ..utils.validation import FLOAT_DTYPES
from ..utils import check_random_state
from ..utils.extmath import safe_sparse_dot
from ..utils.sparsefuncs import mean_variance_axis, inplace_column_scale
from ..utils.fixes import sparse_lsqr
from ..utils.seq_dataset import ArrayDataset, CSRDataset
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError
from ..preprocessing.data import normalize as f_normalize
# TODO: bayesian_ridge_regression and bayesian_regression_ard
# should be squashed into its respective objects.
SPARSE_INTERCEPT_DECAY = 0.01
# For sparse data intercept updates are scaled by this decay factor to avoid
# intercept oscillation.
def make_dataset(X, y, sample_weight, random_state=None):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
"""
rng = check_random_state(random_state)
# seed should never be 0 in SequentialDataset
seed = rng.randint(1, np.iinfo(np.int32).max)
if sp.issparse(X):
dataset = CSRDataset(X.data, X.indptr, X.indices, y, sample_weight,
seed=seed)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
dataset = ArrayDataset(X, y, sample_weight, seed=seed)
intercept_decay = 1.0
return dataset, intercept_decay
@deprecated("sparse_center_data was deprecated in version 0.18 and will be "
"removed in 0.20. Use utilities in preprocessing.data instead")
def sparse_center_data(X, y, fit_intercept, normalize=False):
"""
Compute information needed to center data to have mean zero along
axis 0. Be aware that X will not be centered since it would break
the sparsity, but will be normalized if asked so.
"""
if fit_intercept:
# we might require not to change the csr matrix sometimes
# store a copy if normalize is True.
# Change dtype to float64 since mean_variance_axis accepts
# it that way.
if sp.isspmatrix(X) and X.getformat() == 'csr':
X = sp.csr_matrix(X, copy=normalize, dtype=np.float64)
else:
X = sp.csc_matrix(X, copy=normalize, dtype=np.float64)
X_offset, X_var = mean_variance_axis(X, axis=0)
if normalize:
# transform variance to std in-place
X_var *= X.shape[0]
X_std = np.sqrt(X_var, X_var)
del X_var
X_std[X_std == 0] = 1
inplace_column_scale(X, 1. / X_std)
else:
X_std = np.ones(X.shape[1])
y_offset = y.mean(axis=0)
y = y - y_offset
else:
X_offset = np.zeros(X.shape[1])
X_std = np.ones(X.shape[1])
y_offset = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)
return X, y, X_offset, y_offset, X_std
@deprecated("center_data was deprecated in version 0.18 and will be removed in "
"0.20. Use utilities in preprocessing.data instead")
def center_data(X, y, fit_intercept, normalize=False, copy=True,
sample_weight=None):
"""
Centers data to have mean zero along axis 0. This is here because
nearly all linear models will want their data to be centered.
If sample_weight is not None, then the weighted mean of X and y
is zero, and not the mean itself
"""
X = as_float_array(X, copy)
if fit_intercept:
if isinstance(sample_weight, numbers.Number):
sample_weight = None
if sp.issparse(X):
X_offset = np.zeros(X.shape[1])
X_std = np.ones(X.shape[1])
else:
X_offset = np.average(X, axis=0, weights=sample_weight)
X -= X_offset
# XXX: currently scaled to variance=n_samples
if normalize:
X_std = np.sqrt(np.sum(X ** 2, axis=0))
X_std[X_std == 0] = 1
X /= X_std
else:
X_std = np.ones(X.shape[1])
y_offset = np.average(y, axis=0, weights=sample_weight)
y = y - y_offset
else:
X_offset = np.zeros(X.shape[1])
X_std = np.ones(X.shape[1])
y_offset = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)
return X, y, X_offset, y_offset, X_std
def _preprocess_data(X, y, fit_intercept, normalize=False, copy=True,
sample_weight=None, return_mean=False):
"""
Centers data to have mean zero along axis 0. If fit_intercept=False or if
the X is a sparse matrix, no centering is done, but normalization can still
be applied. The function returns the statistics necessary to reconstruct
the input data, which are X_offset, y_offset, X_scale, such that the output
X = (X - X_offset) / X_scale
X_scale is the L2 norm of X - X_offset. If sample_weight is not None,
then the weighted mean of X and y is zero, and not the mean itself. If
return_mean=True, the mean, eventually weighted, is returned, independently
of whether X was centered (option used for optimization with sparse data in
coordinate_descend).
This is here because nearly all linear models will want their data to be
centered.
"""
if isinstance(sample_weight, numbers.Number):
sample_weight = None
X = check_array(X, copy=copy, accept_sparse=['csr', 'csc'],
dtype=FLOAT_DTYPES)
if fit_intercept:
if sp.issparse(X):
X_offset, X_var = mean_variance_axis(X, axis=0)
if not return_mean:
X_offset[:] = 0
if normalize:
# TODO: f_normalize could be used here as well but the function
# inplace_csr_row_normalize_l2 must be changed such that it
# can return also the norms computed internally
# transform variance to norm in-place
X_var *= X.shape[0]
X_scale = np.sqrt(X_var, X_var)
del X_var
X_scale[X_scale == 0] = 1
inplace_column_scale(X, 1. / X_scale)
else:
X_scale = np.ones(X.shape[1], dtype=X.dtype)
else:
X_offset = np.average(X, axis=0, weights=sample_weight)
X -= X_offset
if normalize:
X, X_scale = f_normalize(X, axis=0, copy=False,
return_norm=True)
else:
X_scale = np.ones(X.shape[1], dtype=X.dtype)
y_offset = np.average(y, axis=0, weights=sample_weight)
y = y - y_offset
else:
X_offset = np.zeros(X.shape[1], dtype=X.dtype)
X_scale = np.ones(X.shape[1], dtype=X.dtype)
y_offset = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)
return X, y, X_offset, y_offset, X_scale
# TODO: _rescale_data should be factored into _preprocess_data.
# Currently, the fact that sag implements its own way to deal with
# sample_weight makes the refactoring tricky.
def _rescale_data(X, y, sample_weight):
"""Rescale data so as to support sample_weight"""
n_samples = X.shape[0]
sample_weight = sample_weight * np.ones(n_samples)
sample_weight = np.sqrt(sample_weight)
sw_matrix = sparse.dia_matrix((sample_weight, 0),
shape=(n_samples, n_samples))
X = safe_sparse_dot(sw_matrix, X)
y = safe_sparse_dot(sw_matrix, y)
return X, y
class LinearModel(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for Linear Models"""
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _decision_function(self, X):
check_is_fitted(self, "coef_")
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
Returns
-------
C : array, shape = (n_samples,)
Returns predicted values.
"""
return self._decision_function(X)
_preprocess_data = staticmethod(_preprocess_data)
def _set_intercept(self, X_offset, y_offset, X_scale):
"""Set the intercept_
"""
if self.fit_intercept:
self.coef_ = self.coef_ / X_scale
self.intercept_ = y_offset - np.dot(X_offset, self.coef_.T)
else:
self.intercept_ = 0.
# XXX Should this derive from LinearModel? It should be a mixin, not an ABC.
# Maybe the n_features checking can be moved to LinearModel.
class LinearClassifierMixin(ClassifierMixin):
"""Mixin for linear classifiers.
Handles prediction for sparse and dense X.
"""
def decision_function(self, X):
"""Predict confidence scores for samples.
The confidence score for a sample is the signed distance of that
sample to the hyperplane.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
Returns
-------
array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes)
Confidence scores per (sample, class) combination. In the binary
case, confidence score for self.classes_[1] where >0 means this
class would be predicted.
"""
if not hasattr(self, 'coef_') or self.coef_ is None:
raise NotFittedError("This %(name)s instance is not fitted "
"yet" % {'name': type(self).__name__})
X = check_array(X, accept_sparse='csr')
n_features = self.coef_.shape[1]
if X.shape[1] != n_features:
raise ValueError("X has %d features per sample; expecting %d"
% (X.shape[1], n_features))
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel() if scores.shape[1] == 1 else scores
def predict(self, X):
"""Predict class labels for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples]
Predicted class label per sample.
"""
scores = self.decision_function(X)
if len(scores.shape) == 1:
indices = (scores > 0).astype(np.int)
else:
indices = scores.argmax(axis=1)
return self.classes_[indices]
def _predict_proba_lr(self, X):
"""Probability estimation for OvR logistic regression.
Positive class probabilities are computed as
1. / (1. + np.exp(-self.decision_function(X)));
multiclass is handled by normalizing that over all classes.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if prob.ndim == 1:
return np.vstack([1 - prob, prob]).T
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
class SparseCoefMixin(object):
"""Mixin for converting coef_ to and from CSR format.
L1-regularizing estimators should inherit this.
"""
def densify(self):
"""Convert coefficient matrix to dense array format.
Converts the ``coef_`` member (back) to a numpy.ndarray. This is the
default format of ``coef_`` and is required for fitting, so calling
this method is only required on models that have previously been
sparsified; otherwise, it is a no-op.
Returns
-------
self : estimator
"""
msg = "Estimator, %(name)s, must be fitted before densifying."
check_is_fitted(self, "coef_", msg=msg)
if sp.issparse(self.coef_):
self.coef_ = self.coef_.toarray()
return self
def sparsify(self):
"""Convert coefficient matrix to sparse format.
Converts the ``coef_`` member to a scipy.sparse matrix, which for
L1-regularized models can be much more memory- and storage-efficient
than the usual numpy.ndarray representation.
The ``intercept_`` member is not converted.
Notes
-----
For non-sparse models, i.e. when there are not many zeros in ``coef_``,
this may actually *increase* memory usage, so use this method with
care. A rule of thumb is that the number of zero elements, which can
be computed with ``(coef_ == 0).sum()``, must be more than 50% for this
to provide significant benefits.
After calling this method, further fitting with the partial_fit
method (if any) will not work until you call densify.
Returns
-------
self : estimator
"""
msg = "Estimator, %(name)s, must be fitted before sparsifying."
check_is_fitted(self, "coef_", msg=msg)
self.coef_ = sp.csr_matrix(self.coef_)
return self
class LinearRegression(LinearModel, RegressorMixin):
"""
Ordinary least squares Linear Regression.
Parameters
----------
fit_intercept : boolean, optional, default True
whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit`` on
an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
n_jobs : int, optional, default 1
The number of jobs to use for the computation.
If -1 all CPUs are used. This will only provide speedup for
n_targets > 1 and sufficient large problems.
Attributes
----------
coef_ : array, shape (n_features, ) or (n_targets, n_features)
Estimated coefficients for the linear regression problem.
If multiple targets are passed during the fit (y 2D), this
is a 2D array of shape (n_targets, n_features), while if only
one target is passed, this is a 1D array of length n_features.
intercept_ : array
Independent term in the linear model.
Notes
-----
From the implementation point of view, this is just plain Ordinary
Least Squares (scipy.linalg.lstsq) wrapped as a predictor object.
"""
def __init__(self, fit_intercept=True, normalize=False, copy_X=True,
n_jobs=1):
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
"""
Fit linear model.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples, n_targets]
Target values
sample_weight : numpy array of shape [n_samples]
Individual weights for each sample
.. versionadded:: 0.17
parameter *sample_weight* support to LinearRegression.
Returns
-------
self : returns an instance of self.
"""
n_jobs_ = self.n_jobs
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
y_numeric=True, multi_output=True)
if sample_weight is not None and np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, fit_intercept=self.fit_intercept, normalize=self.normalize,
copy=self.copy_X, sample_weight=sample_weight)
if sample_weight is not None:
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
if sp.issparse(X):
if y.ndim < 2:
out = sparse_lsqr(X, y)
self.coef_ = out[0]
self._residues = out[3]
else:
# sparse_lstsq cannot handle y with shape (M, K)
outs = Parallel(n_jobs=n_jobs_)(
delayed(sparse_lsqr)(X, y[:, j].ravel())
for j in range(y.shape[1]))
self.coef_ = np.vstack(out[0] for out in outs)
self._residues = np.vstack(out[3] for out in outs)
else:
self.coef_, self._residues, self.rank_, self.singular_ = \
linalg.lstsq(X, y)
self.coef_ = self.coef_.T
if y.ndim == 1:
self.coef_ = np.ravel(self.coef_)
self._set_intercept(X_offset, y_offset, X_scale)
return self
def _pre_fit(X, y, Xy, precompute, normalize, fit_intercept, copy):
"""Aux function used at beginning of fit in linear models"""
n_samples, n_features = X.shape
if sparse.isspmatrix(X):
precompute = False
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, fit_intercept=fit_intercept, normalize=normalize,
return_mean=True)
else:
# copy was done in fit if necessary
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, fit_intercept=fit_intercept, normalize=normalize, copy=copy)
if hasattr(precompute, '__array__') and (
fit_intercept and not np.allclose(X_offset, np.zeros(n_features)) or
normalize and not np.allclose(X_scale, np.ones(n_features))):
warnings.warn("Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
UserWarning)
# recompute Gram
precompute = 'auto'
Xy = None
# precompute if n_samples > n_features
if isinstance(precompute, six.string_types) and precompute == 'auto':
precompute = (n_samples > n_features)
if precompute is True:
# make sure that the 'precompute' array is contiguous.
precompute = np.empty(shape=(n_features, n_features), dtype=X.dtype,
order='C')
np.dot(X.T, X, out=precompute)
if not hasattr(precompute, '__array__'):
Xy = None # cannot use Xy if precompute is not Gram
if hasattr(precompute, '__array__') and Xy is None:
common_dtype = np.find_common_type([X.dtype, y.dtype], [])
if y.ndim == 1:
# Xy is 1d, make sure it is contiguous.
Xy = np.empty(shape=n_features, dtype=common_dtype, order='C')
np.dot(X.T, y, out=Xy)
else:
# Make sure that Xy is always F contiguous even if X or y are not
# contiguous: the goal is to make it fast to extract the data for a
# specific target.
n_targets = y.shape[1]
Xy = np.empty(shape=(n_features, n_targets), dtype=common_dtype,
order='F')
np.dot(y.T, X, out=Xy.T)
return X, y, X_offset, y_offset, X_scale, precompute, Xy
| bsd-3-clause |
udacity/ggplot | ggplot/geoms/geom_boxplot.py | 12 | 1218 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
from .geom import geom
from ggplot.utils import is_string
from ggplot.utils import is_categorical
class geom_boxplot(geom):
DEFAULT_AES = {'y': None, 'color': 'black', 'flier_marker': '+'}
REQUIRED_AES = {'x'}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity'}
def __group(self, x, y):
out = {}
for xx, yy in zip(x,y):
if yy not in out: out[yy] = []
out[yy].append(xx)
return out
def _plot_unit(self, pinfo, ax):
x = pinfo.pop('x')
y = pinfo.pop('y')
color = pinfo.pop('color')
fliermarker = pinfo.pop('flier_marker')
if y is not None:
g = self.__group(x,y)
l = sorted(g.keys())
x = [g[k] for k in l]
q = ax.boxplot(x, vert=False)
plt.setp(q['boxes'], color=color)
plt.setp(q['whiskers'], color=color)
plt.setp(q['fliers'], color=color, marker=fliermarker)
if l:
plt.setp(ax, yticklabels=l)
| bsd-2-clause |
Winand/pandas | pandas/tests/io/msgpack/test_pack.py | 13 | 4928 | # coding: utf-8
import pytest
import struct
from pandas import compat
from pandas.compat import u, OrderedDict
from pandas.io.msgpack import packb, unpackb, Unpacker, Packer
class TestPack(object):
def check(self, data, use_list=False):
re = unpackb(packb(data), use_list=use_list)
assert re == data
def testPack(self):
test_data = [
0, 1, 127, 128, 255, 256, 65535, 65536,
-1, -32, -33, -128, -129, -32768, -32769,
1.0,
b"", b"a", b"a" * 31, b"a" * 32,
None, True, False,
(), ((),), ((), None,),
{None: 0},
(1 << 23),
]
for td in test_data:
self.check(td)
def testPackUnicode(self):
test_data = [u(""), u("abcd"), [u("defgh")], u("Русский текст"), ]
for td in test_data:
re = unpackb(
packb(td, encoding='utf-8'), use_list=1, encoding='utf-8')
assert re == td
packer = Packer(encoding='utf-8')
data = packer.pack(td)
re = Unpacker(
compat.BytesIO(data), encoding='utf-8', use_list=1).unpack()
assert re == td
def testPackUTF32(self):
test_data = [
compat.u(""),
compat.u("abcd"),
[compat.u("defgh")],
compat.u("Русский текст"),
]
for td in test_data:
re = unpackb(
packb(td, encoding='utf-32'), use_list=1, encoding='utf-32')
assert re == td
def testPackBytes(self):
test_data = [b"", b"abcd", (b"defgh", ), ]
for td in test_data:
self.check(td)
def testIgnoreUnicodeErrors(self):
re = unpackb(
packb(b'abc\xeddef'), encoding='utf-8', unicode_errors='ignore',
use_list=1)
assert re == "abcdef"
def testStrictUnicodeUnpack(self):
pytest.raises(UnicodeDecodeError, unpackb, packb(b'abc\xeddef'),
encoding='utf-8', use_list=1)
def testStrictUnicodePack(self):
pytest.raises(UnicodeEncodeError, packb, compat.u("abc\xeddef"),
encoding='ascii', unicode_errors='strict')
def testIgnoreErrorsPack(self):
re = unpackb(
packb(
compat.u("abcФФФdef"), encoding='ascii',
unicode_errors='ignore'), encoding='utf-8', use_list=1)
assert re == compat.u("abcdef")
def testNoEncoding(self):
pytest.raises(TypeError, packb, compat.u("abc"), encoding=None)
def testDecodeBinary(self):
re = unpackb(packb("abc"), encoding=None, use_list=1)
assert re == b"abc"
def testPackFloat(self):
assert packb(1.0,
use_single_float=True) == b'\xca' + struct.pack('>f', 1.0)
assert packb(
1.0, use_single_float=False) == b'\xcb' + struct.pack('>d', 1.0)
def testArraySize(self, sizes=[0, 5, 50, 1000]):
bio = compat.BytesIO()
packer = Packer()
for size in sizes:
bio.write(packer.pack_array_header(size))
for i in range(size):
bio.write(packer.pack(i))
bio.seek(0)
unpacker = Unpacker(bio, use_list=1)
for size in sizes:
assert unpacker.unpack() == list(range(size))
def test_manualreset(self, sizes=[0, 5, 50, 1000]):
packer = Packer(autoreset=False)
for size in sizes:
packer.pack_array_header(size)
for i in range(size):
packer.pack(i)
bio = compat.BytesIO(packer.bytes())
unpacker = Unpacker(bio, use_list=1)
for size in sizes:
assert unpacker.unpack() == list(range(size))
packer.reset()
assert packer.bytes() == b''
def testMapSize(self, sizes=[0, 5, 50, 1000]):
bio = compat.BytesIO()
packer = Packer()
for size in sizes:
bio.write(packer.pack_map_header(size))
for i in range(size):
bio.write(packer.pack(i)) # key
bio.write(packer.pack(i * 2)) # value
bio.seek(0)
unpacker = Unpacker(bio)
for size in sizes:
assert unpacker.unpack() == dict((i, i * 2) for i in range(size))
def test_odict(self):
seq = [(b'one', 1), (b'two', 2), (b'three', 3), (b'four', 4)]
od = OrderedDict(seq)
assert unpackb(packb(od), use_list=1) == dict(seq)
def pair_hook(seq):
return list(seq)
assert unpackb(
packb(od), object_pairs_hook=pair_hook, use_list=1) == seq
def test_pairlist(self):
pairlist = [(b'a', 1), (2, b'b'), (b'foo', b'bar')]
packer = Packer()
packed = packer.pack_map_pairs(pairlist)
unpacked = unpackb(packed, object_pairs_hook=list)
assert pairlist == unpacked
| bsd-3-clause |
lucasosouza/graph-competition | pagerank2.py | 1 | 3831 | import os
import sys
import math
import numpy
import pandas
import pickle
# Generalized matrix operations:
def __extractNodes(matrix):
nodes = set()
for colKey in matrix:
nodes.add(colKey)
for rowKey in matrix.T:
nodes.add(rowKey)
return nodes
def __makeSquare(matrix, keys, default=0.0):
matrix = matrix.copy()
def insertMissingColumns(matrix):
for key in keys:
if not key in matrix:
matrix[key] = pandas.Series(default, index=matrix.index)
return matrix
matrix = insertMissingColumns(matrix) # insert missing columns
matrix = insertMissingColumns(matrix.T).T # insert missing rows
return matrix.fillna(default)
def __ensureRowsPositive(matrix):
matrix = matrix.T
for colKey in matrix:
if matrix[colKey].sum() == 0.0:
matrix[colKey] = pandas.Series(numpy.ones(len(matrix[colKey])), index=matrix.index)
return matrix.T
def __normalizeRows(matrix):
return matrix.div(matrix.sum(axis=1), axis=0)
def __euclideanNorm(series):
return math.sqrt(series.dot(series))
# PageRank specific functionality:
def __startState(nodes):
if len(nodes) == 0: raise ValueError("There must be at least one node.")
startProb = 1.0 / float(len(nodes))
return pandas.Series({node : startProb for node in nodes})
def __integrateRandomSurfer(nodes, transitionProbs, rsp):
alpha = 1.0 / float(len(nodes)) * rsp
return transitionProbs.copy().multiply(1.0 - rsp) + alpha
def powerIteration(transitionWeights, rsp=0.15, epsilon=0.00001, maxIterations=1000):
# Clerical work:
# transitionWeights = pandas.DataFrame(transitionWeights)
nodes = __extractNodes(transitionWeights)
print("transition started")
transitionWeights = __makeSquare(transitionWeights, nodes, default=0.0)
transitionWeights = __ensureRowsPositive(transitionWeights)
print("transition finished")
# Setup:
state = __startState(nodes)
transitionProbs = __normalizeRows(transitionWeights)
transitionProbs = __integrateRandomSurfer(nodes, transitionProbs, rsp)
print("setup finished")
# Power iteration:
for iteration in range(maxIterations):
oldState = state.copy()
state = state.dot(transitionProbs)
delta = state - oldState
# print(delta)
if __euclideanNorm(delta) < epsilon: break
return state
def to_matrix(origin_df):
#ceil = 1000
# get unique nodes
source = set(origin_df.index.unique())
target = set(origin_df['1'].unique())
nodes = source | target
# nodes = list(nodes)[:ceil]
# create zero matrix and init dataframe
len_nodes = len(nodes)
# len_nodes = ceil
zero_matrix = numpy.zeros((len_nodes, len_nodes))
new_df = pandas.DataFrame(zero_matrix, index=nodes, columns=nodes, dtype=numpy.int32)
# itera e cria a matriz
for i in range(len_nodes):
# filtra casos que nao estao no index
if i in source:
# cria a lista com os target/outgoing
outgoing_nodes = numpy.array(origin_df.ix[i, '1']).flatten()
# itero pelo target
for j in [var for var in outgoing_nodes]: # if var < ceil]:
# atualizo o valor na matriz
new_df.ix[i, j] = 1
# print(numpy.bincount(new_df.ix[:, 370] > 0))
return new_df
f = "networks/real2.csv"
# out_f = "input_test.csv"
out_df = to_matrix(pandas.DataFrame.from_csv(f))
# out_df.to_csv(out_f)
# f = "input_test.csv"
# in_df = pandas.DataFrame.from_csv(f, dtypes=numpy.int32)
# in_df = pandas.read_csv(f, dtype=numpy.int32, index_col=0)
# import pdb;pdb.set_trace()
state = powerIteration(out_df)
print(state.order(ascending=False))
with open('page_rank_results.p', 'wb') as handle:
pickle.dump(state, handle)
| mit |
ycaihua/scikit-learn | sklearn/covariance/tests/test_covariance.py | 28 | 10115 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
"""Tests Covariance module on a simple dataset.
"""
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
"""Tests ShrunkCovariance module on a simple dataset.
"""
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
"""Tests LedoitWolf module on a simple dataset.
"""
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# (too) large data set
X_large = np.ones((20, 200))
assert_raises(MemoryError, ledoit_wolf, X_large, block_size=100)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
X_1sample = np.arange(5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_oas():
"""Tests OAS module on a simple dataset.
"""
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
### Same tests without assuming centered data
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
X_1sample = np.arange(5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
jminyu/PatternRecognition_library | Data_generation.py | 1 | 2150 | __author__ = 'Schmidtz'
import matplotlib
import numpy as np
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from numpy import matlib
from numpy import *
from numpy.random import *
import pylab as p
import math
from scipy import stats, mgrid, c_, reshape, random, rot90, linalg
from Prob_function import *
def genData(Ndat):
c1 = 0.5
r1 = 0.4
r2 = 0.3
# generate enough data to filter
N = 20*Ndat
X = array(random_sample(N))
Y = array(random_sample(N))
X1 = X[(X-c1)*(X-c1) + (Y-c1)*(Y-c1) < r1*r1]
Y1 = Y[(X-c1)*(X-c1) + (Y-c1)*(Y-c1) < r1*r1]
X2 = X1[(X1-c1)*(X1-c1) + (Y1-c1)*(Y1-c1) > r2*r2]
Y2 = Y1[(X1-c1)*(X1-c1) + (Y1-c1)*(Y1-c1) > r2*r2]
X3 = X2[ abs(X2-Y2)>0.05 ]
Y3 = Y2[ abs(X2-Y2)>0.05 ]
#X3 = X2[ X2-Y2>0.15 ]
#Y3 = Y2[ X2-Y2>0.15]
X4=zeros(Ndat, dtype=float32)
Y4=zeros(Ndat, dtype=float32)
for i in xrange(Ndat):
if (X3[i]-Y3[i]) >0.05:
X4[i] = X3[i] + 0.08
Y4[i] = Y3[i] + 0.18
else:
X4[i] = X3[i] - 0.08
Y4[i] = Y3[i] - 0.18
print "X", size(X3[0:Ndat]), "Y", size(Y3)
return(vstack((X4[0:Ndat],Y4[0:Ndat])))
if __name__ == "__main__":
random.seed(12345)
dat = genData(500)
print dat.shape
plt.figure(1)
plt.plot(dat[0,:],dat[1,:],'b.')
sigma_x, mu_x = shoot(dat[0,:])
sigma_y, mu_y = shoot(dat[1,:])
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
delta = 0.025
X, Y = np.meshgrid(dat[0,:],dat[1,:])
G = mlab.bivariate_normal(X, Y, sigma_x, sigma_y, mu_x, mu_y)
# Create a simple contour plot with labels using default colors. The
# inline argument to clabel will control whether the labels are draw
# over the line segments of the contour, removing the lines beneath
# the label
CS = plt.contour(X, Y, G)
plt.title('Simplest default with labels')
plt.show()
| gpl-3.0 |
untom/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 142 | 18692 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)]
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)]
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [1., 0.]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
RalphBariz/RalphsDotNet | Old/RalphsDotNet.Apps.OptimizationStudio/Resources/PyLib/numpy/lib/polynomial.py | 58 | 35930 | """
Functions to operate on polynomials.
"""
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import isscalar, abs, finfo, atleast_1d, hstack
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros, sort_complex
from numpy.lib.type_check import iscomplex, real, imag
from numpy.linalg import eigvals, lstsq
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Evaluate a polynomial at a point.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
pass
else:
raise ValueError, "input must be 1d or square 2d array."
if len(seq_of_zeros) == 0:
return 1.0
a = [1]
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, [1, -seq_of_zeros[k]], mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0,roots)))
if (len(pos_roots) == len(neg_roots) and
NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like of shape(M,)
Rank-1 array of polynomial co-efficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError:
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with
a given sequence of roots.
polyval : Evaluate a polynomial at a point.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] Wikipedia, "Companion matrix",
http://en.wikipedia.org/wiki/Companion_matrix
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError,"Input must be a rank-1 array."
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0, :] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : {array_like, poly1d}
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : {None, list of `m` scalars, scalar}, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError, "Order of integral must be positive (see polyder)"
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError, \
"k must be a scalar or a rank-1 array of length 1 or >m."
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError, "Order of derivative must be positive (see polyint)"
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than this
relative to the largest singular value will be ignored. The default
value is len(x)*eps, where eps is the relative precision of the float
type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is
False (the default) just the coefficients are returned, when True
diagnostic information from the singular value decomposition is also
returned.
Returns
-------
p : ndarray, shape (M,) or (M, K)
Polynomial coefficients, highest power first.
If `y` was 2-D, the coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond : present only if `full` = True
Residuals of the least-squares fit, the effective rank of the scaled
Vandermonde coefficient matrix, its singular values, and the specified
value of `rcond`. For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Computes polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[n] + ... + x[0] * p[1] + p[0] = y[0]
x[1]**n * p[n] + ... + x[1] * p[1] + p[0] = y[1]
...
x[k]**n * p[n] + ... + x[k] * p[1] + p[0] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0 :
raise ValueError, "expected deg >= 0"
if x.ndim != 1:
raise TypeError, "expected 1D vector for x"
if x.size == 0:
raise TypeError, "expected non-empty vector for x"
if y.ndim < 1 or y.ndim > 2 :
raise TypeError, "expected 1D or 2D array for y"
if x.shape[0] != y.shape[0] :
raise TypeError, "expected x and y to have same length"
# set rcond
if rcond is None :
rcond = len(x)*finfo(x.dtype).eps
# scale x to improve condition number
scale = abs(x).max()
if scale != 0 :
x /= scale
# solve least squares equation for powers of x
v = vander(x, order)
c, resids, rank, s = lstsq(v, y, rcond)
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
# scale returned coefficients
if scale != 0 :
if c.ndim == 1 :
c /= vander([scale], order)[0]
else :
c /= vander([scale], order).T
if full :
return c, resids, rank, s, rcond
else :
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, a 1D array of numbers, or an instance of poly1d, "at"
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = x * y + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print p1
1 x + 2
>>> print p2
2
9 x + 5 x + 4
>>> print np.polyadd(p1, p2)
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print p1
2
1 x + 2 x + 3
>>> print p2
2
9 x + 5 x + 1
>>> print np.polymul(p1, p2)
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1,a2 = poly1d(a1),poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while 1:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2)+len(toadd2) > wrap) or \
(len(line1)+len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print np.poly1d(p)
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print p
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
coeffs = None
order = None
variable = None
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__['variable'] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError, "Polynomial must be 1d only."
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
if variable is None:
variable = 'x'
self.__dict__['variable'] = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError, "Power to non-negative integers only."
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
return NX.alltrue(self.coeffs == other.coeffs)
def __ne__(self, other):
return NX.any(self.coeffs != other.coeffs)
def __setattr__(self, key, val):
raise ValueError, "Attributes cannot be changed this way."
def __getattr__(self, key):
if key in ['r', 'roots']:
return roots(self.coeffs)
elif key in ['c','coef','coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError("'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError, "Does not support negative powers."
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always',RankWarning)
| gpl-3.0 |
shakamunyi/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_queue_runner_test.py | 62 | 5053 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions as ff
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff.enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff.enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff.enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff.enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
alphacsc/alphacsc | examples/other/plot_simulate_swm.py | 1 | 3342 | """
=====================
SWM on simulated data
=====================
This example shows how the sliding window method (SWM) [1]
works on simulated data. The code is adapted from the
`neurodsp package <https://github.com/voytekresearch/neurodsp/>`_
from Voytek lab. Note that, at present, it does not
implement parallel tempering.
[1] Gips, Bart, et al.
Discovering recurring patterns in electrophysiological recordings.
Journal of neuroscience methods 275 (2017): 66-79.
"""
# Authors: Scott Cole
# Mainak Jas <[email protected]>
#
# License: BSD (3-clause)
###############################################################################
# Let us define the model parameters
n_times_atom = 64 # L
n_times = 5000 # T
n_trials = 10 # N
###############################################################################
# The algorithm does not naturally lend itself to multiple atoms. Therefore,
# we simulate only one atom.
n_atoms = 1 # K
###############################################################################
# A minimum spacing between the windows averaged must be found.
min_spacing = 200 # G
###############################################################################
# Now, we can simulate
from alphacsc import check_random_state
from alphacsc.simulate import simulate_data
random_state_simulate = 1
X, ds_true, z_true = simulate_data(n_trials, n_times, n_times_atom,
n_atoms, random_state_simulate,
constant_amplitude=True)
rng = check_random_state(random_state_simulate)
X += 0.01 * rng.randn(*X.shape)
###############################################################################
# We expect 10 occurences of the atom in total.
# So, let us define 10 random locations for the algorithm to start with.
# If this number is not known, we will end up estimating more/less windows.
import numpy as np
window_starts = rng.choice(np.arange(n_trials * n_times), size=n_trials)
###############################################################################
# Now, we apply the SWM algorithm now.
from alphacsc.other.swm import sliding_window_matching
random_state = 42
X = X.reshape(X.shape[0] * X.shape[1]) # expects 1D time series
d_hat, window_starts, J = sliding_window_matching(
X, L=n_times_atom, G=min_spacing, window_starts_custom=window_starts,
max_iterations=10000, T=0.01, random_state=random_state)
###############################################################################
# Let us look at the data at the time windows when the atoms are found.
import matplotlib.pyplot as plt
fig, axes = plt.subplots(2, n_trials // 2, sharex=True, sharey=True,
figsize=(15, 3))
axes = axes.ravel()
for ax, w_start in zip(axes, window_starts):
ax.plot(X[w_start:w_start + n_times_atom])
###############################################################################
# It is not perfect, but it does find time windows where the atom
# is present. Now let us plot the atoms.
plt.figure()
plt.plot(d_hat / np.linalg.norm(d_hat))
plt.plot(ds_true.T, '--')
###############################################################################
# and the cost function over iterations
plt.figure()
plt.plot(J)
plt.ylabel('Cost function J')
plt.xlabel('Iteration #')
plt.show()
| bsd-3-clause |
aceofwings/Evt-Gateway | gateway/can/engine.py | 1 | 11613 | from gateway.utils.resourcelocator import ResourceLocator
from gateway.can.traffic.server import Server, CoreHandler
from gateway.can.traffic.reciever import Receiver
from gateway.can.traffic.message import CanMessage
from gateway.can.control.errorhandler import ErrorHandler
from gateway.can.control.noticehandler import NoticeHandler
from gateway.can.control.notices import NewConnection
from gateway.can.control.errors import ApplicationSocketClosed, CannotEstablishConnection
from gateway.core.server import Server as ServiceServer
from gateway.core.application import Application
from gateway.launchers.controlerize import load_controllers
from gateway.can.traffic.canout import CanOutlet, StandAloneCanOutlet
import time
import socket
import struct
import os
import sys
import logging
import threading
import queue
import json
"""
CAN Engine|
-----------
Daemon responsible for polling CAN Buses for incoming messages
and relaying to Core.
Nicholas and Federico
Amatruda and Rueda
"""
APP = "APPLICATION"
SERVER = "SERVER"
PY_PLOT = "PYPLOT"
STAND_ALONE = "STANDALONE"
logger = logging.getLogger(__name__)
defautOptions = {'interfaces' : {}}
class Engine(object):
conf = None
can_outs = {}
engine_server = None
notices = queue.Queue()
core_class = None
outlet = CanOutlet
core_service = True
core = None
receivers = []
def __init__(self, *args, **options):
options = {**defautOptions, **options}
super().__init__()
self.load_engine(options['interfaces'])
self.establish_core(Server,options)
self.core = self.core_class(self)
self.start_recievers()
def start_recievers(self):
for receiver in self.receivers:
receiver.start()
def establish_core(self,server_cls,options):
if self.core_service:
tempfolder = ResourceLocator.get_locator(relative_path="temp")
if 'core_address' not in options:
full_path = tempfolder.fetch_file_path('core.out')
else:
full_path = tempfolder.fetch_file_path(options['core_address'])
try:
os.unlink(full_path)
except OSError:
if os.path.exists(full_path):
print("The path exists")
try:
self.engine_server = server_cls(full_path, CoreHandler)
self.server_thread = threading.Thread(target=self.engine_server.serve_forever)
self.engine_server.engine = self
except socket.error as msg:
pass
except OSError as msg:
print(msg)
def load_engine(self, interfaces):
self.error_handler = ErrorHandler(self, **{'force_send' : True})
self.notice_handler = NoticeHandler(self)
try:
import controllers
print("loading controllers")
load_controllers(controllers)
except ImportError:
print("No Controllers Found")
def start(self):
self.server_thread.start()
while True:
try:
engine_msg = self.notices.get()
if issubclass(type(engine_msg), Exception):
self.error_handler.handle_error(engine_msg)
else:
self.notice_handler.handle_notice(engine_msg)
except queue.Empty as msg:
print(msg)
def shutdown(self):
self.engine_server.shutdown()
current_thread = threading.current_thread()
for thread in threading.enumerate():
if thread is not current_thread and hasattr(thread,"service"):
thread.clean_up()
thread.join()
thread.clean_up_r()
sys.exit(1)
"""
Daemon takes messages from the outgoing_buffer
JSON string is converted to bytes and sent across CAN socket
"""
def COREreceive(self,message):
"""
Encoding functions go in here
"""
return CanMessage.from_JSON(message)
def COREsend(self,message):
"""
Decoding functions go in here
Subclass for more functionality
"""
return message.to_JSON().encode()
def Corenotice(self,message):
pass
def engine_notice(self,notice):
self.queue_notice(notice)
def engine_error(self,error):
self.queue_notice(error)
def queue_notice(self,notice):
try:
self.notices.put(notice)
except queue.Full as msg:
self.notifyEngine()
def force_send(self,msg):
pass
def notifyEngine(self):
pass
@classmethod
def getEngineType(cls,engine_type):
"""
return an application engine or server engine
"""
if engine_type == SERVER:
return ServerEngine
elif engine_type == APP:
return ApplicationEngine
elif engine_type == PY_PLOT:
return GraphicApplicationEngine
elif engine_type == STAND_ALONE:
return StandAloneApplicationEngine
else:
return None
class ServerEngine(Engine):
applications = set()
client_lock = threading.RLock()
core_class = ServiceServer
def __init__(self,*args, **options):
super().__init__(*args,**options)
self.max_connections = options['max_connections']
def COREreceive(self,message):
message = super().COREreceive(message)
self.core.handleMessage(message)
def COREsend(self,message):
enc_msg = super().COREsend(message)
with self.client_lock:
for application in self.applications:
try:
self.engine_server.socket.sendto(enc_msg, application)
except socket.error as servererror:
if servererror.errno == socket.errno.ECONNREFUSED:
self.engine_error(ApplicationSocketClosed(application))
def load_engine(self, interfaces):
super().load_engine(interfaces)
for address, interfaceType in interfaces.items():
receiver = Receiver((address, interfaceType), self)
self.receivers.append(receiver)
self.can_outs[interfaceType] = receiver.socket_descriptor
def COREnotice(self,message):
pass
def COREerror(self,message):
pass
class StandAloneApplicationEngine(Engine):
core_class = Application
interface_types = []
def __init__(self,*args, **options):
self.outlet = StandAloneCanOutlet
self.core_service = False
super().__init__(*args,**options)
"""
For an Application that is not standalone interfaces are actually just expected types to be handled from
the incoming core
"""
def load_engine(self,interfaces):
super().load_engine(interfaces)
for address, interfaceType in interfaces.items():
receiver = Receiver((address, interfaceType), self)
self.receivers.append(receiver)
self.can_outs[interfaceType] = receiver.socket_descriptor
def start(self):
while True:
try:
engine_msg = self.notices.get()
if issubclass(type(engine_msg), Exception):
self.error_handler.handle_error(engine_msg)
else:
self.notice_handler.handle_notice(engine_msg)
except queue.Empty as msg:
print(msg)
def connect_to_server(self,server_address):
self.COREnotice(NewConnection(self.engine_server.socket.getsockname()),server_address)
def COREreceive(self,message):
self.core.handleMessage(message)
def COREsend(self,message):
pass
def COREnotice(self,message,server_address = None):
if server_address is not None:
try:
self.engine_server.socket.sendto(message.TO_JSON().encode(),server_address)
except FileNotFoundError:
self.engine_error(CannotEstablishConnection(server_address))
def COREerror(self,message):
pass
def shutdown(self):
current_thread = threading.current_thread()
for thread in threading.enumerate():
if thread is not current_thread and hasattr(thread,"service"):
thread.clean_up()
thread.join()
thread.clean_up_r()
sys.exit(1)
class ApplicationEngine(Engine):
core_class = Application
interface_types = []
def __init__(self,*args, **options):
super().__init__(*args,**options)
self.connect_to_server(options["server"])
"""
For an Application that is not standalone interfaces are actually just expected types to be handled from
the incoming core
"""
def load_engine(self,interfaces):
super().load_engine(interfaces)
for address, interfaceType in interfaces.items():
self.interface_types.append(interfaceType)
def connect_to_server(self,server_address):
self.COREnotice(NewConnection(self.engine_server.socket.getsockname()),server_address)
def COREreceive(self,message):
message = super().COREreceive(message)
self.core.handleMessage(message)
def COREsend(self,message):
pass
def COREnotice(self,message,server_address = None):
if server_address is not None:
try:
self.engine_server.socket.sendto(message.TO_JSON().encode(),server_address)
except FileNotFoundError:
self.engine_error(CannotEstablishConnection(server_address))
def COREerror(self,message):
pass
class GraphicApplicationEngine(Engine):
core_class = Application
interface_types = []
def __init__(self,*args, **options):
super().__init__(*args,**options)
self.core = Application(self)
self.connect_to_server(options["server"])
"""
For an Application that is not standalone interfaces are actually just expected types to be handled from
the incoming core
"""
def load_engine(self,interfaces):
super().load_engine(interfaces)
for address, interfaceType in interfaces.items():
self.interface_types.append(interfaceType)
def start(self):
self.server_thread.start()
from matplotlib import pyplot, animation
while True:
pyplot.show()
pyplot.pause(0.0001)
try:
engine_msg = self.notices.get_nowait()
if engine_msg is issubclass(type(engine_msg), Exception):
self.error_handler.handle_error(engine_msg)
else:
self.notice_handler.handle_notice(engine_msg)
except queue.Empty as msg:
pass
except:
pass
def connect_to_server(self,server_address):
self.COREnotice(NewConnection(self.engine_server.socket.getsockname()),server_address)
def COREreceive(self,message):
message = super().COREreceive(message)
self.core.handleMessage(message)
def COREsend(self,message):
pass
def COREnotice(self,message,server_address = None):
if server_address is not None:
try:
self.engine_server.socket.sendto(message.TO_JSON().encode(),server_address)
except FileNotFoundError:
self.engine_error(CannotEstablishConnection(server_address))
def COREerror(self,message):
pass
| mit |
piyueh/PetIBM | examples/navierstokes/liddrivencavity2dRe5000/scripts/plotCenterlineVelocities.py | 2 | 3779 | """
Plots the velocities along the centerlines of the 2D cavity at Reynolds number
5000 and compares with the numerical data reported in Ghia et al. (1982).
_References:_
* Ghia, U. K. N. G., Ghia, K. N., & Shin, C. T. (1982).
High-Re solutions for incompressible flow using the Navier-Stokes equations
and a multigrid method.
Journal of computational physics, 48(3), 387-411.
"""
import os
import pathlib
import numpy
import h5py
from matplotlib import pyplot
# User's parameters
Re = 5000.0 # Reynolds number
time_step = 60000 # Time step at which to read the solution
# End of user's parameters
simu_dir = pathlib.Path(__file__).absolute().parents[1]
data_dir = simu_dir / 'data'
root_dir = os.environ.get('PETIBM_EXAMPLES')
if not root_dir:
root_dir = simu_dir.parents[1]
def get_gridline_velocity(x_target, u, x, axis=0):
i = numpy.where(x < x_target)[0][-1]
x_a, x_b = x[i], x[i + 1]
if axis == 0:
u_a, u_b = u[:, i], u[:, i + 1]
elif axis == 1:
u_a, u_b = u[i], u[i + 1]
return (u_a * (x_b - x_target) + u_b * (x_target - x_a)) / (x_b - x_a)
def read_data_ghia_et_al_1982(filepath, Re):
with open(filepath, 'r') as infile:
data = numpy.loadtxt(infile, dtype=numpy.float64, unpack=True)
re2col = {100.0: (1, 7), 1000.0: (2, 8), 3200.0: (3, 9), 5000.0: (4, 10),
10000.0: (5, 11)}
return {'vertical': {'y': data[0], 'u': data[re2col[Re][0]]},
'horizontal': {'x': data[6], 'v': data[re2col[Re][1]]}}
def read_field_hdf5(name, fieldpath, gridpath):
field = {}
f = h5py.File(gridpath, 'r')
field['x'], field['y'] = f[name]['x'][:], f[name]['y'][:]
f = h5py.File(fieldpath, 'r')
field['values'] = f[name][:]
return field
# Reads data from Ghia et al. (1982).
filepath = root_dir / 'data' / 'ghia_et_al_1982_lid_driven_cavity.dat'
ghia = read_data_ghia_et_al_1982(filepath, Re)
# Reads gridlines and velocity fields.
gridpath = data_dir / 'grid.h5'
filepath = data_dir / '{:0>7}.h5'.format(time_step)
u = read_field_hdf5('u', filepath, gridpath)
v = read_field_hdf5('v', filepath, gridpath)
# Computes x-velocity along vertical gridline at mid-cavity.
x_target = 0.5
u['vertical'] = get_gridline_velocity(x_target, u['values'], u['x'], axis=0)
# Computes y-velocity along horizontal gridline at mid-cavity.
y_target = 0.5
v['horizontal'] = get_gridline_velocity(y_target, v['values'], v['y'], axis=1)
pyplot.rc('font', family='serif', size=16)
# Plots the centerline velocities.
simu_kwargs = {'label': 'PetIBM',
'color': '#336699', 'linestyle': '-', 'linewidth': 3,
'zorder': 10}
ghia_kwargs = {'label': 'Ghia et al. (1982)',
'color': '#993333', 'linewidth': 0,
'markeredgewidth': 2, 'markeredgecolor': '#993333',
'markerfacecolor': 'none',
'marker': 'o', 'markersize': 8,
'zorder': 10}
fig, ax = pyplot.subplots(nrows=2, figsize=(8.0, 8.0))
fig.suptitle('Re = {}'.format(int(Re)))
ax[0].grid()
ax[0].set_xlabel('y')
ax[0].set_ylabel('u (x={})'.format(x_target))
ax[0].plot(u['y'], u['vertical'], **simu_kwargs)
ax[0].plot(ghia['vertical']['y'], ghia['vertical']['u'], **ghia_kwargs)
ax[0].axis((0.0, 1.0, -0.75, 1.25))
ax[0].legend(loc='upper left')
ax[1].grid()
ax[1].set_xlabel('x')
ax[1].set_ylabel('v (y={})'.format(y_target))
ax[1].plot(v['x'], v['horizontal'], **simu_kwargs)
ax[1].plot(ghia['horizontal']['x'], ghia['horizontal']['v'], **ghia_kwargs)
ax[1].axis((0.0, 1.0, -0.75, 1.25))
ax[1].legend(loc='upper left')
pyplot.show()
# Save figure.
fig_dir = simu_dir / 'figures'
fig_dir.mkdir(parents=True, exist_ok=True)
filepath = fig_dir / 'centerlineVelocities{:0>7}.png'.format(time_step)
fig.savefig(str(filepath), dpi=300)
| bsd-3-clause |
fernandezcuesta/pySMSCMon | t4mon/__init__.py | 2 | 2990 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# isort:skip_file
import os
import sys
import matplotlib # isort:skip
# Set matplotlib's backend before first import of pyplot or pylab,
# Qt4 doesn't like threads
# TODO: unify matplotlib backends
if os.name == 'posix':
matplotlib.use('Cairo')
else:
matplotlib.use('TkAgg')
import six.moves.tkinter_filedialog # Required when using TkAgg backend
from t4mon._version import get_versions
from t4mon.collector import add_methods_to_pandas_dataframe, read_pickle
from t4mon.gen_plot import plot_var
from t4mon.gen_report import gen_report
from t4mon.logger import init_logger
from t4mon.collector import Collector
from t4mon.orchestrator import Orchestrator
from t4mon import arguments
__version__ = get_versions()['version']
__author__ = 'fernandezjm'
__all__ = ('main',
'dump_config')
del get_versions
def dump_config(output=None, **kwargs):
"""
Dump current configuration to screen, useful for creating a new
``settings.cfg`` file
Arguments:
output (Optional[str]): output filename, stdout if None
"""
conf = arguments.read_config(**kwargs)
conf.write(output or sys.stdout)
def main(): # pragma: no cover
"""
Get input arguments and pass it to Orchestrator
"""
sys_arguments = sys.argv[1:]
arguments_ = arguments._parse_arguments_cli(sys_arguments)
if arguments_.get('config', False):
dump_config(**arguments_)
return
for par in ['local', 'localcsv']:
if arguments_.get(par, False):
sys_arguments.remove('--{0}'.format(par))
create_reports_from_local(
sys_arguments,
prog='{0} --{1}'.format(sys.argv[0], par),
pkl=par is 'local'
)
return
arguments_ = arguments._parse_arguments_main(
sys_arguments if sys_arguments else ['--dummy']
)
_orchestrator = Orchestrator(**arguments_)
_orchestrator.start()
# def gui(): # pragma: no cover
# """ Graphical interface for main """
# sys_arguments = sys.argv[1:]
# arguments_ = arguments._parse_arguments_gui(sys_arguments)
# _orchestrator = Orchestrator(**arguments_)
# _orchestrator.start()
def create_reports_from_local(cli_arguments,
prog=None,
pkl=True): # pragma: no cover
"""
Create HTML reports from locally stored data
"""
arguments_ = arguments._parse_arguments_local(cli_arguments,
prog=prog,
pkl=pkl)
_orchestrator = Orchestrator(**arguments_)
argument_file_name = '{0}_file'.format('pkl' if pkl else 'csv')
_orchestrator.create_reports_from_local(arguments_.pop(argument_file_name),
pkl=pkl,
**arguments_)
if __name__ == "__main__":
main()
| mit |
blaisb/cfdemUtilities | mixing/pressureAnalysis/compareForcesPressure.py | 2 | 3267 | ##################################################################################################
#
# File : compareForcesPressure.py
#
# Run Instructions : python compareForcesPressure.py directory/with/the/pressures Velocity
#
# Author : Bruno Blais
#
# Description : This script takes all the pressure with velocity N in a folder
# Filters them using a low pass butterworth filter
# Then plots them in a single comparison
#
#
##################################################################################################
#Python imports
#----------------
import os
import sys
import numpy
import time
import matplotlib.pyplot as plt
from scipy.signal import butter, lfilter,freqz
from itertools import cycle
#----------------
#********************************
# OPTIONS AND USER PARAMETERS
#********************************
pdf=True
plotRaw=False
cutoff=0.300
fs=5.
filterOrder=3
#*******************************
# List of forces
#*******************************
forces={"std" : "Standard ",
"lift" : "Lift Force",
#"drag" : "Only Drag",
"visc" : "No Visc force",
"p" : "No Pressure force"}
suffix="Forc_1_1e8_0.90_148700_"
# Figures parameters
plt.rcParams['figure.figsize'] = 12, 9
params = {'backend': 'ps',
'axes.labelsize': 24,
'text.fontsize': 20,
'legend.fontsize': 19,
'xtick.labelsize': 20,
'ytick.labelsize': 20,
'text.usetex': True,
}
plt.rcParams.update(params)
lines = ["-.","-","--",":"]
lineCycler = cycle(lines)
colors = ["g","c","r","k"]
colorCycler = cycle(colors)
#=======================
# Functions
#=======================
def butter_lowpass(cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='low', analog=False)
return b, a
def butter_lowpass_filter(data, cutoff, fs, order=5):
b, a = butter_lowpass(cutoff, fs, order=order)
y = lfilter(b, a, data)
return y
#======================
# MAIN
#======================
# Directory to work within
try:
folder=sys.argv[1]
except:
print "You need a folder argument"
# Velocity to analyse
try:
N=sys.argv[2]
except:
print "You need a velocity argument"
#Initiate figure
ax=plt.figure("Data and filter")
axp = ax.add_subplot(111)
plt.ylabel('Pressure at the bottom [Pa] ')
plt.xlabel('Time [s]')
# Loop through all times
for i in forces:
print "Opening ", i
t,p = numpy.loadtxt(folder+"/"+i+ suffix+ str(N), unpack=True,comments="#")
sortIndex=numpy.argsort(t)
t=t[sortIndex]
pS=p[sortIndex]
pF=butter_lowpass_filter(pS,cutoff,fs,order=filterOrder)
if (plotRaw): axp.plot(t, pS,'ko', label='Brute signal - ' +N+ ' RPM'+i,mfc='none')
axp.plot(t, pF,next(lineCycler),color=next(colorCycler), label=forces[i],linewidth=5)
#box = axp.get_position()
#axp.set_position([box.x0, box.y0, box.width * 0.9, box.height])
#plt.legend(loc='center left', bbox_to_anchor=(1., 0.5))
plt.legend(loc='best')
#plt.ylim([-10,550])
#plt.xlim([0,200])
axp.grid(b=True, which='major', color='k', linestyle='--')
if (pdf): plt.savefig("./compareForces_"+str(N)+".pdf")
plt.show()
| lgpl-3.0 |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tseries/holiday.py | 5 | 16279 | import warnings
from pandas import DateOffset, DatetimeIndex, Series, Timestamp
from pandas.compat import add_metaclass
from datetime import datetime, timedelta
from dateutil.relativedelta import MO, TU, WE, TH, FR, SA, SU # noqa
from pandas.tseries.offsets import Easter, Day
import numpy as np
def next_monday(dt):
"""
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday, use Monday instead
"""
if dt.weekday() == 5:
return dt + timedelta(2)
elif dt.weekday() == 6:
return dt + timedelta(1)
return dt
def next_monday_or_tuesday(dt):
"""
For second holiday of two adjacent ones!
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday or Monday, use following Tuesday instead
(because Monday is already taken by adjacent holiday on the day before)
"""
dow = dt.weekday()
if dow == 5 or dow == 6:
return dt + timedelta(2)
elif dow == 0:
return dt + timedelta(1)
return dt
def previous_friday(dt):
"""
If holiday falls on Saturday or Sunday, use previous Friday instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt - timedelta(2)
return dt
def sunday_to_monday(dt):
"""
If holiday falls on Sunday, use day thereafter (Monday) instead.
"""
if dt.weekday() == 6:
return dt + timedelta(1)
return dt
def weekend_to_monday(dt):
"""
If holiday falls on Sunday or Saturday,
use day thereafter (Monday) instead.
Needed for holidays such as Christmas observation in Europe
"""
if dt.weekday() == 6:
return dt + timedelta(1)
elif dt.weekday() == 5:
return dt + timedelta(2)
return dt
def nearest_workday(dt):
"""
If holiday falls on Saturday, use day before (Friday) instead;
if holiday falls on Sunday, use day thereafter (Monday) instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt + timedelta(1)
return dt
def next_workday(dt):
"""
returns next weekday used for observances
"""
dt += timedelta(days=1)
while dt.weekday() > 4:
# Mon-Fri are 0-4
dt += timedelta(days=1)
return dt
def previous_workday(dt):
"""
returns previous weekday used for observances
"""
dt -= timedelta(days=1)
while dt.weekday() > 4:
# Mon-Fri are 0-4
dt -= timedelta(days=1)
return dt
def before_nearest_workday(dt):
"""
returns previous workday after nearest workday
"""
return previous_workday(nearest_workday(dt))
def after_nearest_workday(dt):
"""
returns next workday after nearest workday
needed for Boxing day or multiple holidays in a series
"""
return next_workday(nearest_workday(dt))
class Holiday(object):
"""
Class that defines a holiday with start/end dates and rules
for observance.
"""
def __init__(self, name, year=None, month=None, day=None, offset=None,
observance=None, start_date=None, end_date=None,
days_of_week=None):
"""
Parameters
----------
name : str
Name of the holiday , defaults to class name
offset : array of pandas.tseries.offsets or
class from pandas.tseries.offsets
computes offset from date
observance: function
computes when holiday is given a pandas Timestamp
days_of_week:
provide a tuple of days e.g (0,1,2,3,) for Monday Through Thursday
Monday=0,..,Sunday=6
Examples
--------
>>> from pandas.tseries.holiday import Holiday, nearest_workday
>>> from pandas import DateOffset
>>> from dateutil.relativedelta import MO
>>> USMemorialDay = Holiday('MemorialDay', month=5, day=24,
offset=DateOffset(weekday=MO(1)))
>>> USLaborDay = Holiday('Labor Day', month=9, day=1,
offset=DateOffset(weekday=MO(1)))
>>> July3rd = Holiday('July 3rd', month=7, day=3,)
>>> NewYears = Holiday('New Years Day', month=1, day=1,
observance=nearest_workday),
>>> July3rd = Holiday('July 3rd', month=7, day=3,
days_of_week=(0, 1, 2, 3))
"""
if offset is not None and observance is not None:
raise NotImplementedError("Cannot use both offset and observance.")
self.name = name
self.year = year
self.month = month
self.day = day
self.offset = offset
self.start_date = Timestamp(
start_date) if start_date is not None else start_date
self.end_date = Timestamp(
end_date) if end_date is not None else end_date
self.observance = observance
assert (days_of_week is None or type(days_of_week) == tuple)
self.days_of_week = days_of_week
def __repr__(self):
info = ''
if self.year is not None:
info += 'year={year}, '.format(year=self.year)
info += 'month={mon}, day={day}, '.format(mon=self.month, day=self.day)
if self.offset is not None:
info += 'offset={offset}'.format(offset=self.offset)
if self.observance is not None:
info += 'observance={obs}'.format(obs=self.observance)
repr = 'Holiday: {name} ({info})'.format(name=self.name, info=info)
return repr
def dates(self, start_date, end_date, return_name=False):
"""
Calculate holidays observed between start date and end date
Parameters
----------
start_date : starting date, datetime-like, optional
end_date : ending date, datetime-like, optional
return_name : bool, optional, default=False
If True, return a series that has dates and holiday names.
False will only return dates.
"""
start_date = Timestamp(start_date)
end_date = Timestamp(end_date)
filter_start_date = start_date
filter_end_date = end_date
if self.year is not None:
dt = Timestamp(datetime(self.year, self.month, self.day))
if return_name:
return Series(self.name, index=[dt])
else:
return [dt]
dates = self._reference_dates(start_date, end_date)
holiday_dates = self._apply_rule(dates)
if self.days_of_week is not None:
holiday_dates = holiday_dates[np.in1d(holiday_dates.dayofweek,
self.days_of_week)]
if self.start_date is not None:
filter_start_date = max(self.start_date.tz_localize(
filter_start_date.tz), filter_start_date)
if self.end_date is not None:
filter_end_date = min(self.end_date.tz_localize(
filter_end_date.tz), filter_end_date)
holiday_dates = holiday_dates[(holiday_dates >= filter_start_date) &
(holiday_dates <= filter_end_date)]
if return_name:
return Series(self.name, index=holiday_dates)
return holiday_dates
def _reference_dates(self, start_date, end_date):
"""
Get reference dates for the holiday.
Return reference dates for the holiday also returning the year
prior to the start_date and year following the end_date. This ensures
that any offsets to be applied will yield the holidays within
the passed in dates.
"""
if self.start_date is not None:
start_date = self.start_date.tz_localize(start_date.tz)
if self.end_date is not None:
end_date = self.end_date.tz_localize(start_date.tz)
year_offset = DateOffset(years=1)
reference_start_date = Timestamp(
datetime(start_date.year - 1, self.month, self.day))
reference_end_date = Timestamp(
datetime(end_date.year + 1, self.month, self.day))
# Don't process unnecessary holidays
dates = DatetimeIndex(start=reference_start_date,
end=reference_end_date,
freq=year_offset, tz=start_date.tz)
return dates
def _apply_rule(self, dates):
"""
Apply the given offset/observance to a DatetimeIndex of dates.
Parameters
----------
dates : DatetimeIndex
Dates to apply the given offset/observance rule
Returns
-------
Dates with rules applied
"""
if self.observance is not None:
return dates.map(lambda d: self.observance(d))
if self.offset is not None:
if not isinstance(self.offset, list):
offsets = [self.offset]
else:
offsets = self.offset
for offset in offsets:
# if we are adding a non-vectorized value
# ignore the PerformanceWarnings:
with warnings.catch_warnings(record=True):
dates += offset
return dates
holiday_calendars = {}
def register(cls):
try:
name = cls.name
except:
name = cls.__name__
holiday_calendars[name] = cls
def get_calendar(name):
"""
Return an instance of a calendar based on its name.
Parameters
----------
name : str
Calendar name to return an instance of
"""
return holiday_calendars[name]()
class HolidayCalendarMetaClass(type):
def __new__(cls, clsname, bases, attrs):
calendar_class = super(HolidayCalendarMetaClass, cls).__new__(
cls, clsname, bases, attrs)
register(calendar_class)
return calendar_class
@add_metaclass(HolidayCalendarMetaClass)
class AbstractHolidayCalendar(object):
"""
Abstract interface to create holidays following certain rules.
"""
__metaclass__ = HolidayCalendarMetaClass
rules = []
start_date = Timestamp(datetime(1970, 1, 1))
end_date = Timestamp(datetime(2030, 12, 31))
_cache = None
def __init__(self, name=None, rules=None):
"""
Initializes holiday object with a given set a rules. Normally
classes just have the rules defined within them.
Parameters
----------
name : str
Name of the holiday calendar, defaults to class name
rules : array of Holiday objects
A set of rules used to create the holidays.
"""
super(AbstractHolidayCalendar, self).__init__()
if name is None:
name = self.__class__.__name__
self.name = name
if rules is not None:
self.rules = rules
def rule_from_name(self, name):
for rule in self.rules:
if rule.name == name:
return rule
return None
def holidays(self, start=None, end=None, return_name=False):
"""
Returns a curve with holidays between start_date and end_date
Parameters
----------
start : starting date, datetime-like, optional
end : ending date, datetime-like, optional
return_name : bool, optional
If True, return a series that has dates and holiday names.
False will only return a DatetimeIndex of dates.
Returns
-------
DatetimeIndex of holidays
"""
if self.rules is None:
raise Exception('Holiday Calendar {name} does not have any '
'rules specified'.format(name=self.name))
if start is None:
start = AbstractHolidayCalendar.start_date
if end is None:
end = AbstractHolidayCalendar.end_date
start = Timestamp(start)
end = Timestamp(end)
holidays = None
# If we don't have a cache or the dates are outside the prior cache, we
# get them again
if (self._cache is None or start < self._cache[0] or
end > self._cache[1]):
for rule in self.rules:
rule_holidays = rule.dates(start, end, return_name=True)
if holidays is None:
holidays = rule_holidays
else:
holidays = holidays.append(rule_holidays)
self._cache = (start, end, holidays.sort_index())
holidays = self._cache[2]
holidays = holidays[start:end]
if return_name:
return holidays
else:
return holidays.index
@staticmethod
def merge_class(base, other):
"""
Merge holiday calendars together. The base calendar
will take precedence to other. The merge will be done
based on each holiday's name.
Parameters
----------
base : AbstractHolidayCalendar
instance/subclass or array of Holiday objects
other : AbstractHolidayCalendar
instance/subclass or array of Holiday objects
"""
try:
other = other.rules
except:
pass
if not isinstance(other, list):
other = [other]
other_holidays = dict((holiday.name, holiday) for holiday in other)
try:
base = base.rules
except:
pass
if not isinstance(base, list):
base = [base]
base_holidays = dict([(holiday.name, holiday) for holiday in base])
other_holidays.update(base_holidays)
return list(other_holidays.values())
def merge(self, other, inplace=False):
"""
Merge holiday calendars together. The caller's class
rules take precedence. The merge will be done
based on each holiday's name.
Parameters
----------
other : holiday calendar
inplace : bool (default=False)
If True set rule_table to holidays, else return array of Holidays
"""
holidays = self.merge_class(self, other)
if inplace:
self.rules = holidays
else:
return holidays
USMemorialDay = Holiday('MemorialDay', month=5, day=31,
offset=DateOffset(weekday=MO(-1)))
USLaborDay = Holiday('Labor Day', month=9, day=1,
offset=DateOffset(weekday=MO(1)))
USColumbusDay = Holiday('Columbus Day', month=10, day=1,
offset=DateOffset(weekday=MO(2)))
USThanksgivingDay = Holiday('Thanksgiving', month=11, day=1,
offset=DateOffset(weekday=TH(4)))
USMartinLutherKingJr = Holiday('Dr. Martin Luther King Jr.',
start_date=datetime(1986, 1, 1), month=1, day=1,
offset=DateOffset(weekday=MO(3)))
USPresidentsDay = Holiday('President''s Day', month=2, day=1,
offset=DateOffset(weekday=MO(3)))
GoodFriday = Holiday("Good Friday", month=1, day=1, offset=[Easter(), Day(-2)])
EasterMonday = Holiday("Easter Monday", month=1, day=1,
offset=[Easter(), Day(1)])
class USFederalHolidayCalendar(AbstractHolidayCalendar):
"""
US Federal Government Holiday Calendar based on rules specified by:
https://www.opm.gov/policy-data-oversight/
snow-dismissal-procedures/federal-holidays/
"""
rules = [
Holiday('New Years Day', month=1, day=1, observance=nearest_workday),
USMartinLutherKingJr,
USPresidentsDay,
USMemorialDay,
Holiday('July 4th', month=7, day=4, observance=nearest_workday),
USLaborDay,
USColumbusDay,
Holiday('Veterans Day', month=11, day=11, observance=nearest_workday),
USThanksgivingDay,
Holiday('Christmas', month=12, day=25, observance=nearest_workday)
]
def HolidayCalendarFactory(name, base, other,
base_class=AbstractHolidayCalendar):
rules = AbstractHolidayCalendar.merge_class(base, other)
calendar_class = type(name, (base_class,), {"rules": rules, "name": name})
return calendar_class
| apache-2.0 |
eranroz/dnase | src/scripts/HMMTransitionEvaluation.py | 1 | 3453 | import os
import numpy as np
from config import RES_DIR
from data_provider import SeqLoader
from data_provider.DiscreteTransformer import DiscreteTransformer
from hmm import bwiter
from hmm.HMMModel import DiscreteHMM
__author__ = 'eranroz'
"""
This script evaluates different transition probabilities
for hmm model
Use it to estimate whether there are local maxima points the the bw iterations getting into.
"""
#resolution = 100
resolution = 100
iterations = 7
MODEL_EVALUATION_RESULTS = os.path.join(RES_DIR, 'modelEvaluation')
# transitions from open to closed
alphas = 0.1 ** np.arange(1, 6)
# transitions from closed to open
betas = 0.1 ** np.arange(1, 6)
def calcMatrix():
res_file = open(os.path.join(MODEL_EVALUATION_RESULTS, 'modelTransitionsEvaluation.%s.txt' % str(resolution)), 'w')
print('Loading data')
training = SeqLoader.load_dict('UW.Fetal_Brain.ChromatinAccessibility.H-22510.DS11872', resolution,
DiscreteTransformer())
print('Creating model')
res_matrix = np.zeros((len(alphas), len(betas)))
for r_i, alpha in enumerate(alphas):
for c_i, beta in enumerate(betas):
print('alpha', alpha)
print('beta', beta)
state_transition = np.array(
[
[0.0, 0.99, 0.01], # begin
[0.3, 1 - alpha, alpha], # open (may go to close but prefers to keep the state)
[0.7, beta, 1 - beta] # closed (very small change to get to open)
]
)
emission = np.array([
np.zeros(4),
[0.02, 0.4, 0.5, 0.08], # open - prefers high values
[0.8, 0.1, 0.09, 0.01], # closed - prefers low values
])
model = DiscreteHMM(state_transition, emission)
res_file.write('\n-------------------\n')
res_file.write('Closed-> Open: %s\t,\t Open->Closed: %s\n' % (str(beta), str(alpha)))
res_file.write(str(model))
print('bw start')
new_model, p = bwiter.bw_iter(training['chr1'], model, iterations)
res_file.write('\nnew model\n')
res_file.write(str(new_model))
res_file.write('\np:%s' % str(p))
res_matrix[r_i, c_i] = p
res_file.write('Local maxima as function of different guess parameters')
res_file.write(str(res_matrix))
res_file.close()
np.save(os.path.join(MODEL_EVALUATION_RESULTS, 'modelTransitionsEvaluationMatrix.%s.txt' % str(resolution)), res_matrix)
def create_plot():
import matplotlib.pyplot as plt
res = np.array([
[-2181835.89867419, -2192408.7940744, -2200215.6704851, -2204726.1856845, -2207530.84739521],
[-2189644.41857069, -2199110.289325, -2203881.83793141, -2206813.13797173, -2208797.0438533],
[-2197568.64216762, -2203697.64678551, -2206737.7255108, -2208738.40633111, -2210177.4799934],
[-2202566.21615843, -2206645.72639486, -2208725.58699014, -2210171.86069338, -2211272.36491941],
[-2205750.18901925, -2208654.02659629, -2210165.36927431, -2211271.38763584, -2212139.36257863]
])
alphasA = np.log(alphas)
betasA = np.log(betas)
X, Y = np.meshgrid(alphasA, betasA)
zs = res
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, zs)
#fig.savefig('alphaBeta.png')
plt.show()
#create_plot()
calcMatrix()
| mit |
apple/llvm-project | clang/utils/analyzer/SATest.py | 7 | 15238 | #!/usr/bin/env python
import argparse
import sys
import os
from subprocess import call
SCRIPTS_DIR = os.path.dirname(os.path.realpath(__file__))
PROJECTS_DIR = os.path.join(SCRIPTS_DIR, "projects")
DEFAULT_LLVM_DIR = os.path.realpath(os.path.join(SCRIPTS_DIR,
os.path.pardir,
os.path.pardir,
os.path.pardir))
def add(parser, args):
import SATestAdd
from ProjectMap import ProjectInfo
if args.source == "git" and (args.origin == "" or args.commit == ""):
parser.error(
"Please provide both --origin and --commit if source is 'git'")
if args.source != "git" and (args.origin != "" or args.commit != ""):
parser.error("Options --origin and --commit don't make sense when "
"source is not 'git'")
project = ProjectInfo(args.name[0], args.mode, args.source, args.origin,
args.commit)
SATestAdd.add_new_project(project)
def build(parser, args):
import SATestBuild
SATestBuild.VERBOSE = args.verbose
projects = get_projects(parser, args)
tester = SATestBuild.RegressionTester(args.jobs,
projects,
args.override_compiler,
args.extra_analyzer_config,
args.regenerate,
args.strictness)
tests_passed = tester.test_all()
if not tests_passed:
sys.stderr.write("ERROR: Tests failed.\n")
sys.exit(42)
def compare(parser, args):
import CmpRuns
choices = [CmpRuns.HistogramType.RELATIVE.value,
CmpRuns.HistogramType.LOG_RELATIVE.value,
CmpRuns.HistogramType.ABSOLUTE.value]
if args.histogram is not None and args.histogram not in choices:
parser.error("Incorrect histogram type, available choices are {}"
.format(choices))
dir_old = CmpRuns.ResultsDirectory(args.old[0], args.root_old)
dir_new = CmpRuns.ResultsDirectory(args.new[0], args.root_new)
CmpRuns.dump_scan_build_results_diff(dir_old, dir_new,
show_stats=args.show_stats,
stats_only=args.stats_only,
histogram=args.histogram,
verbose_log=args.verbose_log)
def update(parser, args):
import SATestUpdateDiffs
from ProjectMap import ProjectMap
project_map = ProjectMap()
for project in project_map.projects:
SATestUpdateDiffs.update_reference_results(project, args.git)
def benchmark(parser, args):
from SATestBenchmark import Benchmark
projects = get_projects(parser, args)
benchmark = Benchmark(projects, args.iterations, args.output)
benchmark.run()
def benchmark_compare(parser, args):
import SATestBenchmark
SATestBenchmark.compare(args.old, args.new, args.output)
def get_projects(parser, args):
from ProjectMap import ProjectMap, Size
project_map = ProjectMap()
projects = project_map.projects
def filter_projects(projects, predicate, force=False):
return [project.with_fields(enabled=(force or project.enabled) and
predicate(project))
for project in projects]
if args.projects:
projects_arg = args.projects.split(",")
available_projects = [project.name
for project in projects]
# validate that given projects are present in the project map file
for manual_project in projects_arg:
if manual_project not in available_projects:
parser.error("Project '{project}' is not found in "
"the project map file. Available projects are "
"{all}.".format(project=manual_project,
all=available_projects))
projects = filter_projects(projects, lambda project:
project.name in projects_arg,
force=True)
try:
max_size = Size.from_str(args.max_size)
except ValueError as e:
parser.error("{}".format(e))
projects = filter_projects(projects, lambda project:
project.size <= max_size)
return projects
def docker(parser, args):
if len(args.rest) > 0:
if args.rest[0] != "--":
parser.error("REST arguments should start with '--'")
args.rest = args.rest[1:]
if args.build_image:
docker_build_image()
elif args.shell:
docker_shell(args)
else:
sys.exit(docker_run(args, ' '.join(args.rest)))
def docker_build_image():
sys.exit(call("docker build --tag satest-image {}".format(SCRIPTS_DIR),
shell=True))
def docker_shell(args):
try:
# First we need to start the docker container in a waiting mode,
# so it doesn't do anything, but most importantly keeps working
# while the shell session is in progress.
docker_run(args, "--wait", "--detach")
# Since the docker container is running, we can actually connect to it
call("docker exec -it satest bash", shell=True)
except KeyboardInterrupt:
pass
finally:
docker_cleanup()
def docker_run(args, command, docker_args=""):
try:
return call("docker run --rm --name satest "
"-v {llvm}:/llvm-project "
"-v {build}:/build "
"-v {clang}:/analyzer "
"-v {scripts}:/scripts "
"-v {projects}:/projects "
"{docker_args} "
"satest-image:latest {command}"
.format(llvm=args.llvm_project_dir,
build=args.build_dir,
clang=args.clang_dir,
scripts=SCRIPTS_DIR,
projects=PROJECTS_DIR,
docker_args=docker_args,
command=command),
shell=True)
except KeyboardInterrupt:
docker_cleanup()
def docker_cleanup():
print("Please wait for docker to clean up")
call("docker stop satest", shell=True)
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
# add subcommand
add_parser = subparsers.add_parser(
"add",
help="Add a new project for the analyzer testing.")
# TODO: Add an option not to build.
# TODO: Set the path to the Repository directory.
add_parser.add_argument("name", nargs=1, help="Name of the new project")
add_parser.add_argument("--mode", action="store", default=1, type=int,
choices=[0, 1, 2],
help="Build mode: 0 for single file project, "
"1 for scan_build, "
"2 for single file c++11 project")
add_parser.add_argument("--source", action="store", default="script",
choices=["script", "git", "zip"],
help="Source type of the new project: "
"'git' for getting from git "
"(please provide --origin and --commit), "
"'zip' for unpacking source from a zip file, "
"'script' for downloading source by running "
"a custom script")
add_parser.add_argument("--origin", action="store", default="",
help="Origin link for a git repository")
add_parser.add_argument("--commit", action="store", default="",
help="Git hash for a commit to checkout")
add_parser.set_defaults(func=add)
# build subcommand
build_parser = subparsers.add_parser(
"build",
help="Build projects from the project map and compare results with "
"the reference.")
build_parser.add_argument("--strictness", dest="strictness",
type=int, default=0,
help="0 to fail on runtime errors, 1 to fail "
"when the number of found bugs are different "
"from the reference, 2 to fail on any "
"difference from the reference. Default is 0.")
build_parser.add_argument("-r", dest="regenerate", action="store_true",
default=False,
help="Regenerate reference output.")
build_parser.add_argument("--override-compiler", action="store_true",
default=False, help="Call scan-build with "
"--override-compiler option.")
build_parser.add_argument("-j", "--jobs", dest="jobs",
type=int, default=0,
help="Number of projects to test concurrently")
build_parser.add_argument("--extra-analyzer-config",
dest="extra_analyzer_config", type=str,
default="",
help="Arguments passed to to -analyzer-config")
build_parser.add_argument("--projects", action="store", default="",
help="Comma-separated list of projects to test")
build_parser.add_argument("--max-size", action="store", default=None,
help="Maximum size for the projects to test")
build_parser.add_argument("-v", "--verbose", action="count", default=0)
build_parser.set_defaults(func=build)
# compare subcommand
cmp_parser = subparsers.add_parser(
"compare",
help="Comparing two static analyzer runs in terms of "
"reported warnings and execution time statistics.")
cmp_parser.add_argument("--root-old", dest="root_old",
help="Prefix to ignore on source files for "
"OLD directory",
action="store", type=str, default="")
cmp_parser.add_argument("--root-new", dest="root_new",
help="Prefix to ignore on source files for "
"NEW directory",
action="store", type=str, default="")
cmp_parser.add_argument("--verbose-log", dest="verbose_log",
help="Write additional information to LOG "
"[default=None]",
action="store", type=str, default=None,
metavar="LOG")
cmp_parser.add_argument("--stats-only", action="store_true",
dest="stats_only", default=False,
help="Only show statistics on reports")
cmp_parser.add_argument("--show-stats", action="store_true",
dest="show_stats", default=False,
help="Show change in statistics")
cmp_parser.add_argument("--histogram", action="store", default=None,
help="Show histogram of paths differences. "
"Requires matplotlib")
cmp_parser.add_argument("old", nargs=1, help="Directory with old results")
cmp_parser.add_argument("new", nargs=1, help="Directory with new results")
cmp_parser.set_defaults(func=compare)
# update subcommand
upd_parser = subparsers.add_parser(
"update",
help="Update static analyzer reference results based on the previous "
"run of SATest build. Assumes that SATest build was just run.")
upd_parser.add_argument("--git", action="store_true",
help="Stage updated results using git.")
upd_parser.set_defaults(func=update)
# docker subcommand
dock_parser = subparsers.add_parser(
"docker",
help="Run regression system in the docker.")
dock_parser.add_argument("--build-image", action="store_true",
help="Build docker image for running tests.")
dock_parser.add_argument("--shell", action="store_true",
help="Start a shell on docker.")
dock_parser.add_argument("--llvm-project-dir", action="store",
default=DEFAULT_LLVM_DIR,
help="Path to LLVM source code. Defaults "
"to the repo where this script is located. ")
dock_parser.add_argument("--build-dir", action="store", default="",
help="Path to a directory where docker should "
"build LLVM code.")
dock_parser.add_argument("--clang-dir", action="store", default="",
help="Path to find/install LLVM installation.")
dock_parser.add_argument("rest", nargs=argparse.REMAINDER, default=[],
help="Additionall args that will be forwarded "
"to the docker's entrypoint.")
dock_parser.set_defaults(func=docker)
# benchmark subcommand
bench_parser = subparsers.add_parser(
"benchmark",
help="Run benchmarks by building a set of projects multiple times.")
bench_parser.add_argument("-i", "--iterations", action="store",
type=int, default=20,
help="Number of iterations for building each "
"project.")
bench_parser.add_argument("-o", "--output", action="store",
default="benchmark.csv",
help="Output csv file for the benchmark results")
bench_parser.add_argument("--projects", action="store", default="",
help="Comma-separated list of projects to test")
bench_parser.add_argument("--max-size", action="store", default=None,
help="Maximum size for the projects to test")
bench_parser.set_defaults(func=benchmark)
bench_subparsers = bench_parser.add_subparsers()
bench_compare_parser = bench_subparsers.add_parser(
"compare",
help="Compare benchmark runs.")
bench_compare_parser.add_argument("--old", action="store", required=True,
help="Benchmark reference results to "
"compare agains.")
bench_compare_parser.add_argument("--new", action="store", required=True,
help="New benchmark results to check.")
bench_compare_parser.add_argument("-o", "--output",
action="store", required=True,
help="Output file for plots.")
bench_compare_parser.set_defaults(func=benchmark_compare)
args = parser.parse_args()
args.func(parser, args)
if __name__ == "__main__":
main()
| apache-2.0 |
moutai/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 58 | 1803 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, .2]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.