repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
0x0all/scikit-learn | examples/cluster/plot_kmeans_digits.py | 53 | 4524 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() + 1, reduced_data[:, 0].max() - 1
y_min, y_max = reduced_data[:, 1].min() + 1, reduced_data[:, 1].max() - 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
kevin-intel/scikit-learn | sklearn/utils/_testing.py | 2 | 33524 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# Giorgio Patrini
# Thierry Guillemot
# License: BSD 3 clause
import os
import os.path as op
import inspect
import warnings
import sys
import functools
import tempfile
from subprocess import check_output, STDOUT, CalledProcessError
from subprocess import TimeoutExpired
import re
import contextlib
from collections.abc import Iterable
import scipy as sp
from functools import wraps
from inspect import signature
import shutil
import atexit
import unittest
from unittest import TestCase
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
from numpy.testing import assert_allclose
from numpy.testing import assert_almost_equal
from numpy.testing import assert_approx_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
import joblib
import sklearn
from sklearn.utils import IS_PYPY, _IS_32BIT
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.validation import (
check_array,
check_is_fitted,
check_X_y,
)
__all__ = ["assert_raises",
"assert_raises_regexp",
"assert_array_equal",
"assert_almost_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_approx_equal", "assert_allclose",
"assert_run_python_script", "SkipTest"]
_dummy = TestCase('__init__')
assert_raises = _dummy.assertRaises
SkipTest = unittest.case.SkipTest
assert_dict_equal = _dummy.assertDictEqual
assert_raises_regex = _dummy.assertRaisesRegex
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the backward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Callable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'FutureWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str or callable
The message or a substring of the message to test for. If callable,
it takes a string as the argument and will trigger an AssertionError
if the callable returns `False`.
func : callable
Callable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'FutureWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
def check_in_message(msg): return message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
"""
Parameters
----------
func
*args
**kw
"""
# very important to avoid uncontrolled state propagation
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'FutureWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: [%s]"
% (func.__name__,
', '.join(str(warning) for warning in w)))
return result
def ignore_warnings(obj=None, category=Warning):
"""Context manager and decorator to ignore warnings.
Note: Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging, this is not your tool of choice.
Parameters
----------
obj : callable, default=None
callable where you want to ignore the warnings.
category : warning class, default=Warning
The category to filter. If Warning, all categories will be muted.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if isinstance(obj, type) and issubclass(obj, Warning):
# Avoid common pitfall of passing category as the first positional
# argument which result in the test not being run
warning_name = obj.__name__
raise ValueError(
"'obj' should be a callable where you want to ignore warnings. "
"You passed a warning class instead: 'obj={warning_name}'. "
"If you want to pass a warning class to ignore_warnings, "
"you should use 'category={warning_name}'".format(
warning_name=warning_name))
elif callable(obj):
return _IgnoreWarnings(category=category)(obj)
else:
return _IgnoreWarnings(category=category)
class _IgnoreWarnings:
"""Improved and simplified Python warnings context manager and decorator.
This class allows the user to ignore the warnings raised by a function.
Copied from Python 2.7.5 and modified as required.
Parameters
----------
category : tuple of warning class, default=Warning
The category to filter. By default, all the categories will be muted.
"""
def __init__(self, category):
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
self.category = category
def __call__(self, fn):
"""Decorator to catch and hide warnings without visual nesting."""
@wraps(fn)
def wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore", self.category)
return fn(*args, **kwargs)
return wrapper
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
warnings.simplefilter("ignore", self.category)
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test the message raised in an exception.
Given an exception, a callable to raise the exception, and
a message string, tests that the correct exception is raised and
that the message is a substring of the error thrown. Used to test
that the specific message thrown during an exception is correct.
Parameters
----------
exceptions : exception or tuple of exception
An Exception object.
message : str
The error message or a substring of the error message.
function : callable
Callable object to raise error.
*args : the positional arguments to `function`.
**kwargs : the keyword arguments to `function`.
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def assert_allclose_dense_sparse(x, y, rtol=1e-07, atol=1e-9, err_msg=''):
"""Assert allclose for sparse and dense data.
Both x and y need to be either sparse or dense, they
can't be mixed.
Parameters
----------
x : {array-like, sparse matrix}
First array to compare.
y : {array-like, sparse matrix}
Second array to compare.
rtol : float, default=1e-07
relative tolerance; see numpy.allclose.
atol : float, default=1e-9
absolute tolerance; see numpy.allclose. Note that the default here is
more tolerant than the default for numpy.testing.assert_allclose, where
atol=0.
err_msg : str, default=''
Error message to raise.
"""
if sp.sparse.issparse(x) and sp.sparse.issparse(y):
x = x.tocsr()
y = y.tocsr()
x.sum_duplicates()
y.sum_duplicates()
assert_array_equal(x.indices, y.indices, err_msg=err_msg)
assert_array_equal(x.indptr, y.indptr, err_msg=err_msg)
assert_allclose(x.data, y.data, rtol=rtol, atol=atol, err_msg=err_msg)
elif not sp.sparse.issparse(x) and not sp.sparse.issparse(y):
# both dense
assert_allclose(x, y, rtol=rtol, atol=atol, err_msg=err_msg)
else:
raise ValueError("Can only compare two sparse matrices,"
" not a sparse matrix and an array.")
def set_random_state(estimator, random_state=0):
"""Set random state of an estimator if it has the `random_state` param.
Parameters
----------
estimator : object
The estimator.
random_state : int, RandomState instance or None, default=0
Pseudo random number generator state.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
"""
if "random_state" in estimator.get_params():
estimator.set_params(random_state=random_state)
try:
import pytest
skip_if_32bit = pytest.mark.skipif(_IS_32BIT,
reason='skipped on 32bit platforms')
skip_travis = pytest.mark.skipif(os.environ.get('TRAVIS') == 'true',
reason='skip on travis')
fails_if_pypy = pytest.mark.xfail(IS_PYPY,
reason='not compatible with PyPy')
skip_if_no_parallel = pytest.mark.skipif(not joblib.parallel.mp,
reason="joblib is in serial mode")
# Decorator for tests involving both BLAS calls and multiprocessing.
#
# Under POSIX (e.g. Linux or OSX), using multiprocessing in conjunction
# with some implementation of BLAS (or other libraries that manage an
# internal posix thread pool) can cause a crash or a freeze of the Python
# process.
#
# In practice all known packaged distributions (from Linux distros or
# Anaconda) of BLAS under Linux seems to be safe. So we this problem seems
# to only impact OSX users.
#
# This wrapper makes it possible to skip tests that can possibly cause
# this crash under OS X with.
#
# Under Python 3.4+ it is possible to use the `forkserver` start method
# for multiprocessing to avoid this issue. However it can cause pickling
# errors on interactively defined functions. It therefore not enabled by
# default.
if_safe_multiprocessing_with_blas = pytest.mark.skipif(
sys.platform == 'darwin',
reason="Possible multi-process bug with some BLAS")
except ImportError:
pass
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independence).
"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap:
"""
Parameters
----------
data
mmap_mode : str, default='r'
"""
def __init__(self, data, mmap_mode='r'):
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
data_read_only, self.temp_folder = create_memmap_backed_data(
self.data, mmap_mode=self.mmap_mode, return_folder=True)
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
def create_memmap_backed_data(data, mmap_mode='r', return_folder=False):
"""
Parameters
----------
data
mmap_mode : str, default='r'
return_folder : bool, default=False
"""
temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
atexit.register(functools.partial(_delete_folder, temp_folder, warn=True))
filename = op.join(temp_folder, 'data.pkl')
joblib.dump(data, filename)
memmap_backed_data = joblib.load(filename, mmap_mode=mmap_mode)
result = (memmap_backed_data if not return_folder
else (memmap_backed_data, temp_folder))
return result
# Utils to test docstrings
def _get_args(function, varargs=False):
"""Helper to get function arguments."""
try:
params = signature(function).parameters
except ValueError:
# Error on builtin C function
return []
args = [key for key, param in params.items()
if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)]
if varargs:
varargs = [param.name for param in params.values()
if param.kind == param.VAR_POSITIONAL]
if len(varargs) == 0:
varargs = None
return args, varargs
else:
return args
def _get_func_name(func):
"""Get function full name.
Parameters
----------
func : callable
The function object.
Returns
-------
name : str
The function name.
"""
parts = []
module = inspect.getmodule(func)
if module:
parts.append(module.__name__)
qualname = func.__qualname__
if qualname != func.__name__:
parts.append(qualname[:qualname.find('.')])
parts.append(func.__name__)
return '.'.join(parts)
def check_docstring_parameters(func, doc=None, ignore=None):
"""Helper to check docstring.
Parameters
----------
func : callable
The function object to test.
doc : str, default=None
Docstring if it is passed manually to the test.
ignore : list, default=None
Parameters to ignore.
Returns
-------
incorrect : list
A list of string describing the incorrect results.
"""
from numpydoc import docscrape
incorrect = []
ignore = [] if ignore is None else ignore
func_name = _get_func_name(func)
if (not func_name.startswith('sklearn.') or
func_name.startswith('sklearn.externals')):
return incorrect
# Don't check docstring for property-functions
if inspect.isdatadescriptor(func):
return incorrect
# Don't check docstring for setup / teardown pytest functions
if func_name.split('.')[-1] in ('setup_module', 'teardown_module'):
return incorrect
# Dont check estimator_checks module
if func_name.split('.')[2] == 'estimator_checks':
return incorrect
# Get the arguments from the function signature
param_signature = list(filter(lambda x: x not in ignore, _get_args(func)))
# drop self
if len(param_signature) > 0 and param_signature[0] == 'self':
param_signature.remove('self')
# Analyze function's docstring
if doc is None:
with warnings.catch_warnings(record=True) as w:
try:
doc = docscrape.FunctionDoc(func)
except Exception as exp:
incorrect += [func_name + ' parsing error: ' + str(exp)]
return incorrect
if len(w):
raise RuntimeError('Error for %s:\n%s' % (func_name, w[0]))
param_docs = []
for name, type_definition, param_doc in doc['Parameters']:
# Type hints are empty only if parameter name ended with :
if not type_definition.strip():
if ':' in name and name[:name.index(':')][-1:].strip():
incorrect += [func_name +
' There was no space between the param name and '
'colon (%r)' % name]
elif name.rstrip().endswith(':'):
incorrect += [func_name +
' Parameter %r has an empty type spec. '
'Remove the colon' % (name.lstrip())]
# Create a list of parameters to compare with the parameters gotten
# from the func signature
if '*' not in name:
param_docs.append(name.split(':')[0].strip('` '))
# If one of the docstring's parameters had an error then return that
# incorrect message
if len(incorrect) > 0:
return incorrect
# Remove the parameters that should be ignored from list
param_docs = list(filter(lambda x: x not in ignore, param_docs))
# The following is derived from pytest, Copyright (c) 2004-2017 Holger
# Krekel and others, Licensed under MIT License. See
# https://github.com/pytest-dev/pytest
message = []
for i in range(min(len(param_docs), len(param_signature))):
if param_signature[i] != param_docs[i]:
message += ["There's a parameter name mismatch in function"
" docstring w.r.t. function signature, at index %s"
" diff: %r != %r" %
(i, param_signature[i], param_docs[i])]
break
if len(param_signature) > len(param_docs):
message += ["Parameters in function docstring have less items w.r.t."
" function signature, first missing item: %s" %
param_signature[len(param_docs)]]
elif len(param_signature) < len(param_docs):
message += ["Parameters in function docstring have more items w.r.t."
" function signature, first extra item: %s" %
param_docs[len(param_signature)]]
# If there wasn't any difference in the parameters themselves between
# docstring and signature including having the same length then return
# empty list
if len(message) == 0:
return []
import difflib
import pprint
param_docs_formatted = pprint.pformat(param_docs).splitlines()
param_signature_formatted = pprint.pformat(param_signature).splitlines()
message += ["Full diff:"]
message.extend(
line.strip() for line in difflib.ndiff(param_signature_formatted,
param_docs_formatted)
)
incorrect.extend(message)
# Prepend function name
incorrect = ['In function: ' + func_name] + incorrect
return incorrect
def assert_run_python_script(source_code, timeout=60):
"""Utility to check assertions in an independent Python subprocess.
The script provided in the source code should return 0 and not print
anything on stderr or stdout.
This is a port from cloudpickle https://github.com/cloudpipe/cloudpickle
Parameters
----------
source_code : str
The Python source code to execute.
timeout : int, default=60
Time in seconds before timeout.
"""
fd, source_file = tempfile.mkstemp(suffix='_src_test_sklearn.py')
os.close(fd)
try:
with open(source_file, 'wb') as f:
f.write(source_code.encode('utf-8'))
cmd = [sys.executable, source_file]
cwd = op.normpath(op.join(op.dirname(sklearn.__file__), '..'))
env = os.environ.copy()
try:
env["PYTHONPATH"] = os.pathsep.join([cwd, env["PYTHONPATH"]])
except KeyError:
env["PYTHONPATH"] = cwd
kwargs = {
'cwd': cwd,
'stderr': STDOUT,
'env': env
}
# If coverage is running, pass the config file to the subprocess
coverage_rc = os.environ.get("COVERAGE_PROCESS_START")
if coverage_rc:
kwargs['env']['COVERAGE_PROCESS_START'] = coverage_rc
kwargs['timeout'] = timeout
try:
try:
out = check_output(cmd, **kwargs)
except CalledProcessError as e:
raise RuntimeError(u"script errored with output:\n%s"
% e.output.decode('utf-8'))
if out != b"":
raise AssertionError(out.decode('utf-8'))
except TimeoutExpired as e:
raise RuntimeError(u"script timeout, output so far:\n%s"
% e.output.decode('utf-8'))
finally:
os.unlink(source_file)
def _convert_container(
container, constructor_name, columns_name=None, dtype=None
):
"""Convert a given container to a specific array-like with a dtype.
Parameters
----------
container : array-like
The container to convert.
constructor_name : {"list", "tuple", "array", "sparse", "dataframe", \
"series", "index", "slice", "sparse_csr", "sparse_csc"}
The type of the returned container.
columns_name : index or array-like, default=None
For pandas container supporting `columns_names`, it will affect
specific names.
dtype : dtype, default=None
Force the dtype of the container. Does not apply to `"slice"`
container.
Returns
-------
converted_container
"""
if constructor_name == 'list':
if dtype is None:
return list(container)
else:
return np.asarray(container, dtype=dtype).tolist()
elif constructor_name == 'tuple':
if dtype is None:
return tuple(container)
else:
return tuple(np.asarray(container, dtype=dtype).tolist())
elif constructor_name == 'array':
return np.asarray(container, dtype=dtype)
elif constructor_name == 'sparse':
return sp.sparse.csr_matrix(container, dtype=dtype)
elif constructor_name == 'dataframe':
pd = pytest.importorskip('pandas')
return pd.DataFrame(container, columns=columns_name, dtype=dtype)
elif constructor_name == 'series':
pd = pytest.importorskip('pandas')
return pd.Series(container, dtype=dtype)
elif constructor_name == 'index':
pd = pytest.importorskip('pandas')
return pd.Index(container, dtype=dtype)
elif constructor_name == 'slice':
return slice(container[0], container[1])
elif constructor_name == 'sparse_csr':
return sp.sparse.csr_matrix(container, dtype=dtype)
elif constructor_name == 'sparse_csc':
return sp.sparse.csc_matrix(container, dtype=dtype)
def raises(expected_exc_type, match=None, may_pass=False, err_msg=None):
"""Context manager to ensure exceptions are raised within a code block.
This is similar to and inspired from pytest.raises, but supports a few
other cases.
This is only intended to be used in estimator_checks.py where we don't
want to use pytest. In the rest of the code base, just use pytest.raises
instead.
Parameters
----------
excepted_exc_type : Exception or list of Exception
The exception that should be raised by the block. If a list, the block
should raise one of the exceptions.
match : str or list of str, default=None
A regex that the exception message should match. If a list, one of
the entries must match. If None, match isn't enforced.
may_pass : bool, default=False
If True, the block is allowed to not raise an exception. Useful in
cases where some estimators may support a feature but others must
fail with an appropriate error message. By default, the context
manager will raise an exception if the block does not raise an
exception.
err_msg : str, default=None
If the context manager fails (e.g. the block fails to raise the
proper exception, or fails to match), then an AssertionError is
raised with this message. By default, an AssertionError is raised
with a default error message (depends on the kind of failure). Use
this to indicate how users should fix their estimators to pass the
checks.
Attributes
----------
raised_and_matched : bool
True if an exception was raised and a match was found, False otherwise.
"""
return _Raises(expected_exc_type, match, may_pass, err_msg)
class _Raises(contextlib.AbstractContextManager):
# see raises() for parameters
def __init__(self, expected_exc_type, match, may_pass, err_msg):
self.expected_exc_types = (
expected_exc_type
if isinstance(expected_exc_type, Iterable)
else [expected_exc_type]
)
self.matches = [match] if isinstance(match, str) else match
self.may_pass = may_pass
self.err_msg = err_msg
self.raised_and_matched = False
def __exit__(self, exc_type, exc_value, _):
# see
# https://docs.python.org/2.5/whatsnew/pep-343.html#SECTION000910000000000000000
if exc_type is None: # No exception was raised in the block
if self.may_pass:
return True # CM is happy
else:
err_msg = (
self.err_msg or f"Did not raise: {self.expected_exc_types}"
)
raise AssertionError(err_msg)
if not any(
issubclass(exc_type, expected_type)
for expected_type in self.expected_exc_types
):
if self.err_msg is not None:
raise AssertionError(self.err_msg) from exc_value
else:
return False # will re-raise the original exception
if self.matches is not None:
err_msg = self.err_msg or (
"The error message should contain one of the following "
"patterns:\n{}\nGot {}".format(
"\n".join(self.matches), str(exc_value)
)
)
if not any(re.search(match, str(exc_value))
for match in self.matches):
raise AssertionError(err_msg) from exc_value
self.raised_and_matched = True
return True
class MinimalClassifier:
"""Minimal classifier implementation with inheriting from BaseEstimator.
This estimator should be tested with:
* `check_estimator` in `test_estimator_checks.py`;
* within a `Pipeline` in `test_pipeline.py`;
* within a `SearchCV` in `test_search.py`.
"""
_estimator_type = "classifier"
def __init__(self, param=None):
self.param = param
def get_params(self, deep=True):
return {"param": self.param}
def set_params(self, **params):
for key, value in params.items():
setattr(self, key, value)
return self
def fit(self, X, y):
X, y = check_X_y(X, y)
check_classification_targets(y)
self.classes_, counts = np.unique(y, return_counts=True)
self._most_frequent_class_idx = counts.argmax()
return self
def predict_proba(self, X):
check_is_fitted(self)
X = check_array(X)
proba_shape = (X.shape[0], self.classes_.size)
y_proba = np.zeros(shape=proba_shape, dtype=np.float64)
y_proba[:, self._most_frequent_class_idx] = 1.0
return y_proba
def predict(self, X):
y_proba = self.predict_proba(X)
y_pred = y_proba.argmax(axis=1)
return self.classes_[y_pred]
def score(self, X, y):
from sklearn.metrics import accuracy_score
return accuracy_score(y, self.predict(X))
class MinimalRegressor:
"""Minimal regressor implementation with inheriting from BaseEstimator.
This estimator should be tested with:
* `check_estimator` in `test_estimator_checks.py`;
* within a `Pipeline` in `test_pipeline.py`;
* within a `SearchCV` in `test_search.py`.
"""
_estimator_type = "regressor"
def __init__(self, param=None):
self.param = param
def get_params(self, deep=True):
return {"param": self.param}
def set_params(self, **params):
for key, value in params.items():
setattr(self, key, value)
return self
def fit(self, X, y):
X, y = check_X_y(X, y)
self.is_fitted_ = True
self._mean = np.mean(y)
return self
def predict(self, X):
check_is_fitted(self)
X = check_array(X)
return np.ones(shape=(X.shape[0],)) * self._mean
def score(self, X, y):
from sklearn.metrics import r2_score
return r2_score(y, self.predict(X))
class MinimalTransformer:
"""Minimal transformer implementation with inheriting from
BaseEstimator.
This estimator should be tested with:
* `check_estimator` in `test_estimator_checks.py`;
* within a `Pipeline` in `test_pipeline.py`;
* within a `SearchCV` in `test_search.py`.
"""
def __init__(self, param=None):
self.param = param
def get_params(self, deep=True):
return {"param": self.param}
def set_params(self, **params):
for key, value in params.items():
setattr(self, key, value)
return self
def fit(self, X, y=None):
X = check_array(X)
self.is_fitted_ = True
return self
def transform(self, X, y=None):
check_is_fitted(self)
X = check_array(X)
return X
def fit_transform(self, X, y=None):
return self.fit(X, y).transform(X, y)
| bsd-3-clause |
louispotok/pandas | pandas/io/json/table_schema.py | 6 | 10161 | """
Table Schema builders
http://specs.frictionlessdata.io/json-table-schema/
"""
import warnings
import pandas._libs.json as json
from pandas import DataFrame
from pandas.api.types import CategoricalDtype
import pandas.core.common as com
from pandas.core.dtypes.common import (
is_integer_dtype, is_timedelta64_dtype, is_numeric_dtype,
is_bool_dtype, is_datetime64_dtype, is_datetime64tz_dtype,
is_categorical_dtype, is_period_dtype, is_string_dtype
)
loads = json.loads
def as_json_table_type(x):
"""
Convert a NumPy / pandas type to its corresponding json_table.
Parameters
----------
x : array or dtype
Returns
-------
t : str
the Table Schema data types
Notes
-----
This table shows the relationship between NumPy / pandas dtypes,
and Table Schema dtypes.
============== =================
Pandas type Table Schema type
============== =================
int64 integer
float64 number
bool boolean
datetime64[ns] datetime
timedelta64[ns] duration
object str
categorical any
=============== =================
"""
if is_integer_dtype(x):
return 'integer'
elif is_bool_dtype(x):
return 'boolean'
elif is_numeric_dtype(x):
return 'number'
elif (is_datetime64_dtype(x) or is_datetime64tz_dtype(x) or
is_period_dtype(x)):
return 'datetime'
elif is_timedelta64_dtype(x):
return 'duration'
elif is_categorical_dtype(x):
return 'any'
elif is_string_dtype(x):
return 'string'
else:
return 'any'
def set_default_names(data):
"""Sets index names to 'index' for regular, or 'level_x' for Multi"""
if com._all_not_none(*data.index.names):
nms = data.index.names
if len(nms) == 1 and data.index.name == 'index':
warnings.warn("Index name of 'index' is not round-trippable")
elif len(nms) > 1 and any(x.startswith('level_') for x in nms):
warnings.warn("Index names beginning with 'level_' are not "
"round-trippable")
return data
data = data.copy()
if data.index.nlevels > 1:
names = [name if name is not None else 'level_{}'.format(i)
for i, name in enumerate(data.index.names)]
data.index.names = names
else:
data.index.name = data.index.name or 'index'
return data
def convert_pandas_type_to_json_field(arr, dtype=None):
dtype = dtype or arr.dtype
if arr.name is None:
name = 'values'
else:
name = arr.name
field = {'name': name,
'type': as_json_table_type(dtype)}
if is_categorical_dtype(arr):
if hasattr(arr, 'categories'):
cats = arr.categories
ordered = arr.ordered
else:
cats = arr.cat.categories
ordered = arr.cat.ordered
field['constraints'] = {"enum": list(cats)}
field['ordered'] = ordered
elif is_period_dtype(arr):
field['freq'] = arr.freqstr
elif is_datetime64tz_dtype(arr):
if hasattr(arr, 'dt'):
field['tz'] = arr.dt.tz.zone
else:
field['tz'] = arr.tz.zone
return field
def convert_json_field_to_pandas_type(field):
"""
Converts a JSON field descriptor into its corresponding NumPy / pandas type
Parameters
----------
field
A JSON field descriptor
Returns
-------
dtype
Raises
-----
ValueError
If the type of the provided field is unknown or currently unsupported
Examples
--------
>>> convert_json_field_to_pandas_type({'name': 'an_int',
'type': 'integer'})
'int64'
>>> convert_json_field_to_pandas_type({'name': 'a_categorical',
'type': 'any',
'contraints': {'enum': [
'a', 'b', 'c']},
'ordered': True})
'CategoricalDtype(categories=['a', 'b', 'c'], ordered=True)'
>>> convert_json_field_to_pandas_type({'name': 'a_datetime',
'type': 'datetime'})
'datetime64[ns]'
>>> convert_json_field_to_pandas_type({'name': 'a_datetime_with_tz',
'type': 'datetime',
'tz': 'US/Central'})
'datetime64[ns, US/Central]'
"""
typ = field['type']
if typ == 'string':
return 'object'
elif typ == 'integer':
return 'int64'
elif typ == 'number':
return 'float64'
elif typ == 'boolean':
return 'bool'
elif typ == 'duration':
return 'timedelta64'
elif typ == 'datetime':
if field.get('tz'):
return 'datetime64[ns, {tz}]'.format(tz=field['tz'])
else:
return 'datetime64[ns]'
elif typ == 'any':
if 'constraints' in field and 'ordered' in field:
return CategoricalDtype(categories=field['constraints']['enum'],
ordered=field['ordered'])
else:
return 'object'
raise ValueError("Unsupported or invalid field type: {}".format(typ))
def build_table_schema(data, index=True, primary_key=None, version=True):
"""
Create a Table schema from ``data``.
Parameters
----------
data : Series, DataFrame
index : bool, default True
Whether to include ``data.index`` in the schema.
primary_key : bool or None, default True
column names to designate as the primary key.
The default `None` will set `'primaryKey'` to the index
level or levels if the index is unique.
version : bool, default True
Whether to include a field `pandas_version` with the version
of pandas that generated the schema.
Returns
-------
schema : dict
Examples
--------
>>> df = pd.DataFrame(
... {'A': [1, 2, 3],
... 'B': ['a', 'b', 'c'],
... 'C': pd.date_range('2016-01-01', freq='d', periods=3),
... }, index=pd.Index(range(3), name='idx'))
>>> build_table_schema(df)
{'fields': [{'name': 'idx', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'}],
'pandas_version': '0.20.0',
'primaryKey': ['idx']}
Notes
-----
See `_as_json_table_type` for conversion types.
Timedeltas as converted to ISO8601 duration format with
9 decimal places after the seconds field for nanosecond precision.
Categoricals are converted to the `any` dtype, and use the `enum` field
constraint to list the allowed values. The `ordered` attribute is included
in an `ordered` field.
"""
if index is True:
data = set_default_names(data)
schema = {}
fields = []
if index:
if data.index.nlevels > 1:
for level in data.index.levels:
fields.append(convert_pandas_type_to_json_field(level))
else:
fields.append(convert_pandas_type_to_json_field(data.index))
if data.ndim > 1:
for column, s in data.iteritems():
fields.append(convert_pandas_type_to_json_field(s))
else:
fields.append(convert_pandas_type_to_json_field(data))
schema['fields'] = fields
if index and data.index.is_unique and primary_key is None:
if data.index.nlevels == 1:
schema['primaryKey'] = [data.index.name]
else:
schema['primaryKey'] = data.index.names
elif primary_key is not None:
schema['primaryKey'] = primary_key
if version:
schema['pandas_version'] = '0.20.0'
return schema
def parse_table_schema(json, precise_float):
"""
Builds a DataFrame from a given schema
Parameters
----------
json :
A JSON table schema
precise_float : boolean
Flag controlling precision when decoding string to double values, as
dictated by ``read_json``
Returns
-------
df : DataFrame
Raises
------
NotImplementedError
If the JSON table schema contains either timezone or timedelta data
Notes
-----
Because :func:`DataFrame.to_json` uses the string 'index' to denote a
name-less :class:`Index`, this function sets the name of the returned
:class:`DataFrame` to ``None`` when said string is encountered with a
normal :class:`Index`. For a :class:`MultiIndex`, the same limitation
applies to any strings beginning with 'level_'. Therefore, an
:class:`Index` name of 'index' and :class:`MultiIndex` names starting
with 'level_' are not supported.
See also
--------
build_table_schema : inverse function
pandas.read_json
"""
table = loads(json, precise_float=precise_float)
col_order = [field['name'] for field in table['schema']['fields']]
df = DataFrame(table['data'], columns=col_order)[col_order]
dtypes = {field['name']: convert_json_field_to_pandas_type(field)
for field in table['schema']['fields']}
# Cannot directly use as_type with timezone data on object; raise for now
if any(str(x).startswith('datetime64[ns, ') for x in dtypes.values()):
raise NotImplementedError('table="orient" can not yet read timezone '
'data')
# No ISO constructor for Timedelta as of yet, so need to raise
if 'timedelta64' in dtypes.values():
raise NotImplementedError('table="orient" can not yet read '
'ISO-formatted Timedelta data')
df = df.astype(dtypes)
df = df.set_index(table['schema']['primaryKey'])
if len(df.index.names) == 1:
if df.index.name == 'index':
df.index.name = None
else:
df.index.names = [None if x.startswith('level_') else x for x in
df.index.names]
return df
| bsd-3-clause |
thomasgibson/tabula-rasa | SWE/swe_williamson5.py | 1 | 14187 | """
This script runs a nonlinear shallow water system describing
a simplified atmospheric model on an Earth-sized sphere mesh.
This model problem is designed from the Williamson test case
suite (1992), specifically the mountain test case (case 5).
A simple DG-advection scheme is used for the advection of the
depth-field, and an upwinded-DG method is used for velocity.
The nonlinear system is solved using a Picard method for computing
solution updates (currently set to 4 iterations). The implicit
midpoint rule is employed for time-integration.
The resulting implicit linear system for the updates in each
Picard iteration is solved using either a precondtioned Schur
complement method with GMRES, or a single application of a
Firedrake precondtioner using a mixed-hybrid method. This purpose
of this script is to compare both approaches via profiling and
computing the reductions in the problem residual for the implicit
linear system.
"""
from firedrake.petsc import PETSc
from firedrake import COMM_WORLD, parameters
from argparse import ArgumentParser
from pyop2.profiling import timed_stage
from mpi4py import MPI
import pandas as pd
import sys
import os
import solver as module
parameters["pyop2_options"]["lazy_evaluation"] = False
parser = ArgumentParser(description="""Run Williamson test case 5""",
add_help=False)
parser.add_argument("--hybridization",
action="store_true",
help="Turn hybridization on.")
parser.add_argument("--dt",
action="store",
type=float,
default=1000.0,
help="The time-step size.")
parser.add_argument("--mesh_degree",
action="store",
type=int,
default=1,
help="Degree of the mesh")
parser.add_argument("--model_degree",
action="store",
type=int,
default=2,
help="Degree of the finite element model.")
parser.add_argument("--method",
action="store",
default="BDM",
choices=["RT", "RTCF", "BDM"],
help="Mixed method type for the SWE.")
parser.add_argument("--profile",
action="store_true",
help="Start profiler.")
parser.add_argument("--nsteps",
action="store",
default=20,
type=int,
help="Number of steps to profile.")
parser.add_argument("--refinements",
action="store",
default=4,
type=int,
choices=[3, 4, 5, 6, 7, 8],
help="How many refinements to make to the sphere mesh.")
parser.add_argument("--write",
action="store_true",
help="Write output.")
parser.add_argument("--monitor",
action="store_true",
help="Turn on KSP monitors for debugging")
parser.add_argument("--help",
action="store_true",
help="Show help")
args, _ = parser.parse_known_args()
if args.help:
help = parser.format_help()
PETSc.Sys.Print("%s\n" % help)
sys.exit(1)
PETSc.Log.begin()
def run_williamson5(problem_cls, Dt, refinements, method,
model_degree, mesh_degree, nsteps,
hybridization, write=False, cold=False):
# Radius of the Earth (m)
R = 6371220.0
# Max depth height (m)
H = 5960.0
if cold:
PETSc.Sys.Print("""
Running cold initialization for the problem set:\n
method: %s,\n
model degree: %s,\n
hybridization: %s,\n
""" % (method, model_degree, bool(hybridization)))
problem = problem_cls(refinement_level=refinements,
R=R,
H=H,
Dt=Dt,
method=method,
hybridization=hybridization,
model_degree=model_degree,
mesh_degree=mesh_degree,
monitor=args.monitor)
problem.warmup()
return
problem = problem_cls(refinement_level=refinements,
R=R,
H=H,
Dt=Dt,
method=method,
hybridization=hybridization,
model_degree=model_degree,
mesh_degree=mesh_degree,
monitor=args.monitor)
cfl = problem.courant
dx_max = problem.dx_max
PETSc.Sys.Print("""
Dt = %s,\n
Courant number (approximate): %s,\n
Dx (max): %s km,\n
nsteps: %s.
""" % (Dt, cfl, dx_max/1000, nsteps))
comm = problem.comm
day = 24.*60.*60.
if args.profile:
tmax = nsteps*Dt
else:
tmax = 15*day
PETSc.Sys.Print("Running 15 day simulation\n")
# If writing simulation output, write out fields in 5-day intervals
dumpfreq = 5*day / Dt
PETSc.Sys.Print("Warm up with one-step.\n")
with timed_stage("Warm up"):
problem.warmup()
PETSc.Log.Stage("Warm up: Linear solve").push()
prepcsetup = PETSc.Log.Event("PCSetUp").getPerfInfo()
pre_res_eval = PETSc.Log.Event("SNESFunctionEval").getPerfInfo()
pre_jac_eval = PETSc.Log.Event("SNESJacobianEval").getPerfInfo()
pre_res_eval_time = comm.allreduce(pre_res_eval["time"],
op=MPI.SUM) / comm.size
pre_jac_eval_time = comm.allreduce(pre_jac_eval["time"],
op=MPI.SUM) / comm.size
pre_setup_time = comm.allreduce(prepcsetup["time"],
op=MPI.SUM) / comm.size
if problem.hybridization:
prehybridinit = PETSc.Log.Event("HybridInit").getPerfInfo()
prehybridinit_time = comm.allreduce(prehybridinit["time"],
op=MPI.SUM) / comm.size
PETSc.Log.Stage("Warm up: Linear solve").pop()
PETSc.Sys.Print("Warm up done. Profiling run for %d steps.\n" % nsteps)
problem.initialize()
problem.run_simulation(tmax, write=write, dumpfreq=dumpfreq)
PETSc.Sys.Print("Simulation complete.\n")
PETSc.Log.Stage("Linear solve").push()
snes = PETSc.Log.Event("SNESSolve").getPerfInfo()
ksp = PETSc.Log.Event("KSPSolve").getPerfInfo()
pcsetup = PETSc.Log.Event("PCSetUp").getPerfInfo()
pcapply = PETSc.Log.Event("PCApply").getPerfInfo()
jac_eval = PETSc.Log.Event("SNESJacobianEval").getPerfInfo()
residual = PETSc.Log.Event("SNESFunctionEval").getPerfInfo()
snes_time = comm.allreduce(snes["time"], op=MPI.SUM) / comm.size
ksp_time = comm.allreduce(ksp["time"], op=MPI.SUM) / comm.size
pc_setup_time = comm.allreduce(pcsetup["time"], op=MPI.SUM) / comm.size
pc_apply_time = comm.allreduce(pcapply["time"], op=MPI.SUM) / comm.size
jac_eval_time = comm.allreduce(jac_eval["time"], op=MPI.SUM) / comm.size
res_eval_time = comm.allreduce(residual["time"], op=MPI.SUM) / comm.size
ref = problem.refinement_level
num_cells = comm.allreduce(problem.num_cells, op=MPI.SUM)
if problem.hybridization:
results_data = "results/hybrid_%s_data_W5_ref%d_Dt%s_NS%d.csv" % (
problem.method,
ref,
Dt,
nsteps
)
results_timings = "results/hybrid_%s_profile_W5_ref%d_Dt%s_NS%d.csv" % (
problem.method,
ref,
Dt,
nsteps
)
RHS = PETSc.Log.Event("HybridRHS").getPerfInfo()
trace = PETSc.Log.Event("SCSolve").getPerfInfo()
proj = PETSc.Log.Event("HybridProject").getPerfInfo()
full_recon = PETSc.Log.Event("SCBackSub").getPerfInfo()
hybridbreak = PETSc.Log.Event("HybridBreak").getPerfInfo()
hybridupdate = PETSc.Log.Event("HybridUpdate").getPerfInfo()
hybridinit = PETSc.Log.Event("HybridInit").getPerfInfo()
# Time to reconstruct (backsub) and project
full_recon_time = comm.allreduce(full_recon["time"],
op=MPI.SUM) / comm.size
# Project only
projection = comm.allreduce(proj["time"], op=MPI.SUM) / comm.size
# Backsub only = Total Recon time - projection time
recon_time = full_recon_time - projection
transfer = comm.allreduce(hybridbreak["time"], op=MPI.SUM) / comm.size
update_time = comm.allreduce(hybridupdate["time"],
op=MPI.SUM) / comm.size
trace_solve = comm.allreduce(trace["time"], op=MPI.SUM) / comm.size
rhstime = comm.allreduce(RHS["time"], op=MPI.SUM) / comm.size
inittime = comm.allreduce(hybridinit["time"], op=MPI.SUM) / comm.size
other = ksp_time - (trace_solve + transfer
+ projection + recon_time + rhstime)
full_solve = (transfer + trace_solve + rhstime
+ recon_time + projection + update_time)
else:
results_data = "results/gmres_%s_data_W5_ref%d_Dt%s_NS%d.csv" % (
problem.method,
ref,
Dt,
nsteps
)
results_timings = "results/gmres_%s_profile_W5_ref%d_Dt%s_NS%d.csv" % (
problem.method,
ref,
Dt,
nsteps
)
KSPSchur = PETSc.Log.Event("KSPSolve_FS_Schu").getPerfInfo()
KSPF0 = PETSc.Log.Event("KSPSolve_FS_0").getPerfInfo()
KSPLow = PETSc.Log.Event("KSPSolve_FS_Low").getPerfInfo()
schur_time = comm.allreduce(KSPSchur["time"], op=MPI.SUM) / comm.size
f0_time = comm.allreduce(KSPF0["time"], op=MPI.SUM) / comm.size
ksplow_time = comm.allreduce(KSPLow["time"], op=MPI.SUM) / comm.size
other = ksp_time - (schur_time + f0_time + ksplow_time)
PETSc.Log.Stage("Linear solve").pop()
if COMM_WORLD.rank == 0:
if not os.path.exists(os.path.dirname('results/')):
os.makedirs(os.path.dirname('results/'))
data = {"OuterIters": problem.ksp_outer_its,
"InnerIters": problem.ksp_inner_its,
"PicardIters": problem.picard_seq,
"SimTime": problem.sim_time,
"ResidualReductions": problem.reductions}
dofs = problem.DU.dof_dset.layout_vec.getSize()
time_data = {"PETSCLogKSPSolve": ksp_time,
"PETSCLogPCApply": pc_apply_time,
"PETSCLogPCSetup": pc_setup_time,
"PETSCLogPreSetup": pre_setup_time,
"PETSCLogPreSNESJacobianEval": pre_jac_eval_time,
"PETSCLogPreSNESFunctionEval": pre_res_eval_time,
"SNESSolve": snes_time,
"SNESFunctionEval": res_eval_time,
"SNESJacobianEval": jac_eval_time,
"num_processes": problem.comm.size,
"method": problem.method,
"model_degree": problem.model_degree,
"refinement_level": problem.refinement_level,
"total_dofs": dofs,
"num_cells": num_cells,
"Dt": Dt,
"CFL": cfl,
"nsteps": nsteps,
"DxMax": dx_max}
if problem.hybridization:
updates = {"HybridTraceSolve": trace_solve,
"HybridRHS": rhstime,
"HybridBreak": transfer,
"HybridReconstruction": recon_time,
"HybridProjection": projection,
"HybridFullRecovery": full_recon_time,
"HybridUpdate": update_time,
"HybridInit": inittime,
"PreHybridInit": prehybridinit_time,
"HybridFullSolveTime": full_solve,
"HybridKSPOther": other}
else:
updates = {"KSPSchur": schur_time,
"KSPF0": f0_time,
"KSPFSLow": ksplow_time,
"KSPother": other}
time_data.update(updates)
df_data = pd.DataFrame(data)
df_data.to_csv(results_data, index=False,
mode="w", header=True)
df_time = pd.DataFrame(time_data, index=[0])
df_time.to_csv(results_timings, index=False,
mode="w", header=True)
W5Problem = module.W5Problem
method = args.method
model_degree = args.model_degree
mesh_degree = args.mesh_degree
refinements = args.refinements
hybridization = args.hybridization
Dt = args.dt
if args.profile:
run_williamson5(problem_cls=W5Problem,
Dt=Dt,
refinements=refinements,
method=method,
model_degree=model_degree,
mesh_degree=mesh_degree,
nsteps=args.nsteps,
hybridization=hybridization,
write=False,
# Do a cold run to generate code
cold=True)
# Now start the profiler
run_williamson5(problem_cls=W5Problem,
Dt=Dt,
refinements=refinements,
method=method,
model_degree=model_degree,
mesh_degree=mesh_degree,
nsteps=args.nsteps,
hybridization=hybridization,
write=False,
cold=False)
else:
run_williamson5(problem_cls=W5Problem,
Dt=Dt,
refinements=refinements,
method=method,
model_degree=model_degree,
mesh_degree=mesh_degree,
nsteps=args.nsteps,
hybridization=hybridization,
write=args.write,
cold=False)
| mit |
jhamman/xarray | xarray/tests/test_accessor_str.py | 2 | 24924 | # Tests for the `str` accessor are derived from the original
# pandas string accessor tests.
# For reference, here is a copy of the pandas copyright notice:
# (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team
# All rights reserved.
# Copyright (c) 2008-2011 AQR Capital Management, LLC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the copyright holder nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import numpy as np
import pytest
import xarray as xr
from . import assert_equal, requires_dask
@pytest.fixture(params=[np.str_, np.bytes_])
def dtype(request):
return request.param
@requires_dask
def test_dask():
import dask.array as da
arr = da.from_array(["a", "b", "c"], chunks=-1)
xarr = xr.DataArray(arr)
result = xarr.str.len().compute()
expected = xr.DataArray([1, 1, 1])
assert_equal(result, expected)
def test_count(dtype):
values = xr.DataArray(["foo", "foofoo", "foooofooofommmfoo"]).astype(dtype)
result = values.str.count("f[o]+")
expected = xr.DataArray([1, 2, 4])
assert_equal(result, expected)
def test_contains(dtype):
values = xr.DataArray(["Foo", "xYz", "fOOomMm__fOo", "MMM_"]).astype(dtype)
# case insensitive using regex
result = values.str.contains("FOO|mmm", case=False)
expected = xr.DataArray([True, False, True, True])
assert_equal(result, expected)
# case insensitive without regex
result = values.str.contains("foo", regex=False, case=False)
expected = xr.DataArray([True, False, True, False])
assert_equal(result, expected)
def test_starts_ends_with(dtype):
values = xr.DataArray(["om", "foo_nom", "nom", "bar_foo", "foo"]).astype(dtype)
result = values.str.startswith("foo")
expected = xr.DataArray([False, True, False, False, True])
assert_equal(result, expected)
result = values.str.endswith("foo")
expected = xr.DataArray([False, False, False, True, True])
assert_equal(result, expected)
def test_case(dtype):
da = xr.DataArray(["SOme word"]).astype(dtype)
capitalized = xr.DataArray(["Some word"]).astype(dtype)
lowered = xr.DataArray(["some word"]).astype(dtype)
swapped = xr.DataArray(["soME WORD"]).astype(dtype)
titled = xr.DataArray(["Some Word"]).astype(dtype)
uppered = xr.DataArray(["SOME WORD"]).astype(dtype)
assert_equal(da.str.capitalize(), capitalized)
assert_equal(da.str.lower(), lowered)
assert_equal(da.str.swapcase(), swapped)
assert_equal(da.str.title(), titled)
assert_equal(da.str.upper(), uppered)
def test_replace(dtype):
values = xr.DataArray(["fooBAD__barBAD"]).astype(dtype)
result = values.str.replace("BAD[_]*", "")
expected = xr.DataArray(["foobar"]).astype(dtype)
assert_equal(result, expected)
result = values.str.replace("BAD[_]*", "", n=1)
expected = xr.DataArray(["foobarBAD"]).astype(dtype)
assert_equal(result, expected)
s = xr.DataArray(["A", "B", "C", "Aaba", "Baca", "", "CABA", "dog", "cat"]).astype(
dtype
)
result = s.str.replace("A", "YYY")
expected = xr.DataArray(
["YYY", "B", "C", "YYYaba", "Baca", "", "CYYYBYYY", "dog", "cat"]
).astype(dtype)
assert_equal(result, expected)
result = s.str.replace("A", "YYY", case=False)
expected = xr.DataArray(
["YYY", "B", "C", "YYYYYYbYYY", "BYYYcYYY", "", "CYYYBYYY", "dog", "cYYYt"]
).astype(dtype)
assert_equal(result, expected)
result = s.str.replace("^.a|dog", "XX-XX ", case=False)
expected = xr.DataArray(
["A", "B", "C", "XX-XX ba", "XX-XX ca", "", "XX-XX BA", "XX-XX ", "XX-XX t"]
).astype(dtype)
assert_equal(result, expected)
def test_replace_callable():
values = xr.DataArray(["fooBAD__barBAD"])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace("[a-z][A-Z]{2}", repl, n=2)
exp = xr.DataArray(["foObaD__baRbaD"])
assert_equal(result, exp)
# test regex named groups
values = xr.DataArray(["Foo Bar Baz"])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group("middle").swapcase()
result = values.str.replace(pat, repl)
exp = xr.DataArray(["bAR"])
assert_equal(result, exp)
def test_replace_unicode():
# flags + unicode
values = xr.DataArray([b"abcd,\xc3\xa0".decode("utf-8")])
expected = xr.DataArray([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
assert_equal(result, expected)
def test_replace_compiled_regex(dtype):
values = xr.DataArray(["fooBAD__barBAD"]).astype(dtype)
# test with compiled regex
pat = re.compile(dtype("BAD[_]*"))
result = values.str.replace(pat, "")
expected = xr.DataArray(["foobar"]).astype(dtype)
assert_equal(result, expected)
result = values.str.replace(pat, "", n=1)
expected = xr.DataArray(["foobarBAD"]).astype(dtype)
assert_equal(result, expected)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = xr.DataArray(["fooBAD__barBAD__bad"]).astype(dtype)
pat = re.compile(dtype("BAD[_]*"))
with pytest.raises(ValueError, match="case and flags cannot be"):
result = values.str.replace(pat, "", flags=re.IGNORECASE)
with pytest.raises(ValueError, match="case and flags cannot be"):
result = values.str.replace(pat, "", case=False)
with pytest.raises(ValueError, match="case and flags cannot be"):
result = values.str.replace(pat, "", case=True)
# test with callable
values = xr.DataArray(["fooBAD__barBAD"]).astype(dtype)
repl = lambda m: m.group(0).swapcase()
pat = re.compile(dtype("[a-z][A-Z]{2}"))
result = values.str.replace(pat, repl, n=2)
expected = xr.DataArray(["foObaD__baRbaD"]).astype(dtype)
assert_equal(result, expected)
def test_replace_literal(dtype):
# GH16808 literal replace (regex=False vs regex=True)
values = xr.DataArray(["f.o", "foo"]).astype(dtype)
expected = xr.DataArray(["bao", "bao"]).astype(dtype)
result = values.str.replace("f.", "ba")
assert_equal(result, expected)
expected = xr.DataArray(["bao", "foo"]).astype(dtype)
result = values.str.replace("f.", "ba", regex=False)
assert_equal(result, expected)
# Cannot do a literal replace if given a callable repl or compiled
# pattern
callable_repl = lambda m: m.group(0).swapcase()
compiled_pat = re.compile("[a-z][A-Z]{2}")
msg = "Cannot use a callable replacement when regex=False"
with pytest.raises(ValueError, match=msg):
values.str.replace("abc", callable_repl, regex=False)
msg = "Cannot use a compiled regex as replacement pattern with regex=False"
with pytest.raises(ValueError, match=msg):
values.str.replace(compiled_pat, "", regex=False)
def test_repeat(dtype):
values = xr.DataArray(["a", "b", "c", "d"]).astype(dtype)
result = values.str.repeat(3)
expected = xr.DataArray(["aaa", "bbb", "ccc", "ddd"]).astype(dtype)
assert_equal(result, expected)
def test_match(dtype):
# New match behavior introduced in 0.13
values = xr.DataArray(["fooBAD__barBAD", "foo"]).astype(dtype)
result = values.str.match(".*(BAD[_]+).*(BAD)")
expected = xr.DataArray([True, False])
assert_equal(result, expected)
values = xr.DataArray(["fooBAD__barBAD", "foo"]).astype(dtype)
result = values.str.match(".*BAD[_]+.*BAD")
expected = xr.DataArray([True, False])
assert_equal(result, expected)
def test_empty_str_methods():
empty = xr.DataArray(np.empty(shape=(0,), dtype="U"))
empty_str = empty
empty_int = xr.DataArray(np.empty(shape=(0,), dtype=int))
empty_bool = xr.DataArray(np.empty(shape=(0,), dtype=bool))
empty_bytes = xr.DataArray(np.empty(shape=(0,), dtype="S"))
assert_equal(empty_str, empty.str.title())
assert_equal(empty_int, empty.str.count("a"))
assert_equal(empty_bool, empty.str.contains("a"))
assert_equal(empty_bool, empty.str.startswith("a"))
assert_equal(empty_bool, empty.str.endswith("a"))
assert_equal(empty_str, empty.str.lower())
assert_equal(empty_str, empty.str.upper())
assert_equal(empty_str, empty.str.replace("a", "b"))
assert_equal(empty_str, empty.str.repeat(3))
assert_equal(empty_bool, empty.str.match("^a"))
assert_equal(empty_int, empty.str.len())
assert_equal(empty_int, empty.str.find("a"))
assert_equal(empty_int, empty.str.rfind("a"))
assert_equal(empty_str, empty.str.pad(42))
assert_equal(empty_str, empty.str.center(42))
assert_equal(empty_str, empty.str.slice(stop=1))
assert_equal(empty_str, empty.str.slice(step=1))
assert_equal(empty_str, empty.str.strip())
assert_equal(empty_str, empty.str.lstrip())
assert_equal(empty_str, empty.str.rstrip())
assert_equal(empty_str, empty.str.wrap(42))
assert_equal(empty_str, empty.str.get(0))
assert_equal(empty_str, empty_bytes.str.decode("ascii"))
assert_equal(empty_bytes, empty.str.encode("ascii"))
assert_equal(empty_str, empty.str.isalnum())
assert_equal(empty_str, empty.str.isalpha())
assert_equal(empty_str, empty.str.isdigit())
assert_equal(empty_str, empty.str.isspace())
assert_equal(empty_str, empty.str.islower())
assert_equal(empty_str, empty.str.isupper())
assert_equal(empty_str, empty.str.istitle())
assert_equal(empty_str, empty.str.isnumeric())
assert_equal(empty_str, empty.str.isdecimal())
assert_equal(empty_str, empty.str.capitalize())
assert_equal(empty_str, empty.str.swapcase())
table = str.maketrans("a", "b")
assert_equal(empty_str, empty.str.translate(table))
def test_ismethods(dtype):
values = ["A", "b", "Xy", "4", "3A", "", "TT", "55", "-", " "]
str_s = xr.DataArray(values).astype(dtype)
alnum_e = [True, True, True, True, True, False, True, True, False, False]
alpha_e = [True, True, True, False, False, False, True, False, False, False]
digit_e = [False, False, False, True, False, False, False, True, False, False]
space_e = [False, False, False, False, False, False, False, False, False, True]
lower_e = [False, True, False, False, False, False, False, False, False, False]
upper_e = [True, False, False, False, True, False, True, False, False, False]
title_e = [True, False, True, False, True, False, False, False, False, False]
assert_equal(str_s.str.isalnum(), xr.DataArray(alnum_e))
assert_equal(str_s.str.isalpha(), xr.DataArray(alpha_e))
assert_equal(str_s.str.isdigit(), xr.DataArray(digit_e))
assert_equal(str_s.str.isspace(), xr.DataArray(space_e))
assert_equal(str_s.str.islower(), xr.DataArray(lower_e))
assert_equal(str_s.str.isupper(), xr.DataArray(upper_e))
assert_equal(str_s.str.istitle(), xr.DataArray(title_e))
def test_isnumeric():
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ["A", "3", "¼", "★", "፸", "3", "four"]
s = xr.DataArray(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
assert_equal(s.str.isnumeric(), xr.DataArray(numeric_e))
assert_equal(s.str.isdecimal(), xr.DataArray(decimal_e))
def test_len(dtype):
values = ["foo", "fooo", "fooooo", "fooooooo"]
result = xr.DataArray(values).astype(dtype).str.len()
expected = xr.DataArray([len(x) for x in values])
assert_equal(result, expected)
def test_find(dtype):
values = xr.DataArray(["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF", "XXX"])
values = values.astype(dtype)
result = values.str.find("EF")
assert_equal(result, xr.DataArray([4, 3, 1, 0, -1]))
expected = xr.DataArray([v.find(dtype("EF")) for v in values.values])
assert_equal(result, expected)
result = values.str.rfind("EF")
assert_equal(result, xr.DataArray([4, 5, 7, 4, -1]))
expected = xr.DataArray([v.rfind(dtype("EF")) for v in values.values])
assert_equal(result, expected)
result = values.str.find("EF", 3)
assert_equal(result, xr.DataArray([4, 3, 7, 4, -1]))
expected = xr.DataArray([v.find(dtype("EF"), 3) for v in values.values])
assert_equal(result, expected)
result = values.str.rfind("EF", 3)
assert_equal(result, xr.DataArray([4, 5, 7, 4, -1]))
expected = xr.DataArray([v.rfind(dtype("EF"), 3) for v in values.values])
assert_equal(result, expected)
result = values.str.find("EF", 3, 6)
assert_equal(result, xr.DataArray([4, 3, -1, 4, -1]))
expected = xr.DataArray([v.find(dtype("EF"), 3, 6) for v in values.values])
assert_equal(result, expected)
result = values.str.rfind("EF", 3, 6)
assert_equal(result, xr.DataArray([4, 3, -1, 4, -1]))
xp = xr.DataArray([v.rfind(dtype("EF"), 3, 6) for v in values.values])
assert_equal(result, xp)
def test_index(dtype):
s = xr.DataArray(["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF"]).astype(dtype)
result = s.str.index("EF")
assert_equal(result, xr.DataArray([4, 3, 1, 0]))
result = s.str.rindex("EF")
assert_equal(result, xr.DataArray([4, 5, 7, 4]))
result = s.str.index("EF", 3)
assert_equal(result, xr.DataArray([4, 3, 7, 4]))
result = s.str.rindex("EF", 3)
assert_equal(result, xr.DataArray([4, 5, 7, 4]))
result = s.str.index("E", 4, 8)
assert_equal(result, xr.DataArray([4, 5, 7, 4]))
result = s.str.rindex("E", 0, 5)
assert_equal(result, xr.DataArray([4, 3, 1, 4]))
with pytest.raises(ValueError):
result = s.str.index("DE")
def test_pad(dtype):
values = xr.DataArray(["a", "b", "c", "eeeee"]).astype(dtype)
result = values.str.pad(5, side="left")
expected = xr.DataArray([" a", " b", " c", "eeeee"]).astype(dtype)
assert_equal(result, expected)
result = values.str.pad(5, side="right")
expected = xr.DataArray(["a ", "b ", "c ", "eeeee"]).astype(dtype)
assert_equal(result, expected)
result = values.str.pad(5, side="both")
expected = xr.DataArray([" a ", " b ", " c ", "eeeee"]).astype(dtype)
assert_equal(result, expected)
def test_pad_fillchar(dtype):
values = xr.DataArray(["a", "b", "c", "eeeee"]).astype(dtype)
result = values.str.pad(5, side="left", fillchar="X")
expected = xr.DataArray(["XXXXa", "XXXXb", "XXXXc", "eeeee"]).astype(dtype)
assert_equal(result, expected)
result = values.str.pad(5, side="right", fillchar="X")
expected = xr.DataArray(["aXXXX", "bXXXX", "cXXXX", "eeeee"]).astype(dtype)
assert_equal(result, expected)
result = values.str.pad(5, side="both", fillchar="X")
expected = xr.DataArray(["XXaXX", "XXbXX", "XXcXX", "eeeee"]).astype(dtype)
assert_equal(result, expected)
msg = "fillchar must be a character, not str"
with pytest.raises(TypeError, match=msg):
result = values.str.pad(5, fillchar="XY")
def test_translate():
values = xr.DataArray(["abcdefg", "abcc", "cdddfg", "cdefggg"])
table = str.maketrans("abc", "cde")
result = values.str.translate(table)
expected = xr.DataArray(["cdedefg", "cdee", "edddfg", "edefggg"])
assert_equal(result, expected)
def test_center_ljust_rjust(dtype):
values = xr.DataArray(["a", "b", "c", "eeeee"]).astype(dtype)
result = values.str.center(5)
expected = xr.DataArray([" a ", " b ", " c ", "eeeee"]).astype(dtype)
assert_equal(result, expected)
result = values.str.ljust(5)
expected = xr.DataArray(["a ", "b ", "c ", "eeeee"]).astype(dtype)
assert_equal(result, expected)
result = values.str.rjust(5)
expected = xr.DataArray([" a", " b", " c", "eeeee"]).astype(dtype)
assert_equal(result, expected)
def test_center_ljust_rjust_fillchar(dtype):
values = xr.DataArray(["a", "bb", "cccc", "ddddd", "eeeeee"]).astype(dtype)
result = values.str.center(5, fillchar="X")
expected = xr.DataArray(["XXaXX", "XXbbX", "Xcccc", "ddddd", "eeeeee"])
assert_equal(result, expected.astype(dtype))
result = values.str.ljust(5, fillchar="X")
expected = xr.DataArray(["aXXXX", "bbXXX", "ccccX", "ddddd", "eeeeee"])
assert_equal(result, expected.astype(dtype))
result = values.str.rjust(5, fillchar="X")
expected = xr.DataArray(["XXXXa", "XXXbb", "Xcccc", "ddddd", "eeeeee"])
assert_equal(result, expected.astype(dtype))
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
template = "fillchar must be a character, not {dtype}"
with pytest.raises(TypeError, match=template.format(dtype="str")):
values.str.center(5, fillchar="XY")
with pytest.raises(TypeError, match=template.format(dtype="str")):
values.str.ljust(5, fillchar="XY")
with pytest.raises(TypeError, match=template.format(dtype="str")):
values.str.rjust(5, fillchar="XY")
def test_zfill(dtype):
values = xr.DataArray(["1", "22", "aaa", "333", "45678"]).astype(dtype)
result = values.str.zfill(5)
expected = xr.DataArray(["00001", "00022", "00aaa", "00333", "45678"])
assert_equal(result, expected.astype(dtype))
result = values.str.zfill(3)
expected = xr.DataArray(["001", "022", "aaa", "333", "45678"])
assert_equal(result, expected.astype(dtype))
def test_slice(dtype):
arr = xr.DataArray(["aafootwo", "aabartwo", "aabazqux"]).astype(dtype)
result = arr.str.slice(2, 5)
exp = xr.DataArray(["foo", "bar", "baz"]).astype(dtype)
assert_equal(result, exp)
for start, stop, step in [(0, 3, -1), (None, None, -1), (3, 10, 2), (3, 0, -1)]:
try:
result = arr.str[start:stop:step]
expected = xr.DataArray([s[start:stop:step] for s in arr.values])
assert_equal(result, expected.astype(dtype))
except IndexError:
print(f"failed on {start}:{stop}:{step}")
raise
def test_slice_replace(dtype):
da = lambda x: xr.DataArray(x).astype(dtype)
values = da(["short", "a bit longer", "evenlongerthanthat", ""])
expected = da(["shrt", "a it longer", "evnlongerthanthat", ""])
result = values.str.slice_replace(2, 3)
assert_equal(result, expected)
expected = da(["shzrt", "a zit longer", "evznlongerthanthat", "z"])
result = values.str.slice_replace(2, 3, "z")
assert_equal(result, expected)
expected = da(["shzort", "a zbit longer", "evzenlongerthanthat", "z"])
result = values.str.slice_replace(2, 2, "z")
assert_equal(result, expected)
expected = da(["shzort", "a zbit longer", "evzenlongerthanthat", "z"])
result = values.str.slice_replace(2, 1, "z")
assert_equal(result, expected)
expected = da(["shorz", "a bit longez", "evenlongerthanthaz", "z"])
result = values.str.slice_replace(-1, None, "z")
assert_equal(result, expected)
expected = da(["zrt", "zer", "zat", "z"])
result = values.str.slice_replace(None, -2, "z")
assert_equal(result, expected)
expected = da(["shortz", "a bit znger", "evenlozerthanthat", "z"])
result = values.str.slice_replace(6, 8, "z")
assert_equal(result, expected)
expected = da(["zrt", "a zit longer", "evenlongzerthanthat", "z"])
result = values.str.slice_replace(-10, 3, "z")
assert_equal(result, expected)
def test_strip_lstrip_rstrip(dtype):
values = xr.DataArray([" aa ", " bb \n", "cc "]).astype(dtype)
result = values.str.strip()
expected = xr.DataArray(["aa", "bb", "cc"]).astype(dtype)
assert_equal(result, expected)
result = values.str.lstrip()
expected = xr.DataArray(["aa ", "bb \n", "cc "]).astype(dtype)
assert_equal(result, expected)
result = values.str.rstrip()
expected = xr.DataArray([" aa", " bb", "cc"]).astype(dtype)
assert_equal(result, expected)
def test_strip_lstrip_rstrip_args(dtype):
values = xr.DataArray(["xxABCxx", "xx BNSD", "LDFJH xx"]).astype(dtype)
rs = values.str.strip("x")
xp = xr.DataArray(["ABC", " BNSD", "LDFJH "]).astype(dtype)
assert_equal(rs, xp)
rs = values.str.lstrip("x")
xp = xr.DataArray(["ABCxx", " BNSD", "LDFJH xx"]).astype(dtype)
assert_equal(rs, xp)
rs = values.str.rstrip("x")
xp = xr.DataArray(["xxABC", "xx BNSD", "LDFJH "]).astype(dtype)
assert_equal(rs, xp)
def test_wrap():
# test values are: two words less than width, two words equal to width,
# two words greater than width, one word less than width, one word
# equal to width, one word greater than width, multiple tokens with
# trailing whitespace equal to width
values = xr.DataArray(
[
"hello world",
"hello world!",
"hello world!!",
"abcdefabcde",
"abcdefabcdef",
"abcdefabcdefa",
"ab ab ab ab ",
"ab ab ab ab a",
"\t",
]
)
# expected values
xp = xr.DataArray(
[
"hello world",
"hello world!",
"hello\nworld!!",
"abcdefabcde",
"abcdefabcdef",
"abcdefabcdef\na",
"ab ab ab ab",
"ab ab ab ab\na",
"",
]
)
rs = values.str.wrap(12, break_long_words=True)
assert_equal(rs, xp)
# test with pre and post whitespace (non-unicode), NaN, and non-ascii
# Unicode
values = xr.DataArray([" pre ", "\xac\u20ac\U00008000 abadcafe"])
xp = xr.DataArray([" pre", "\xac\u20ac\U00008000 ab\nadcafe"])
rs = values.str.wrap(6)
assert_equal(rs, xp)
def test_get(dtype):
values = xr.DataArray(["a_b_c", "c_d_e", "f_g_h"]).astype(dtype)
result = values.str[2]
expected = xr.DataArray(["b", "d", "g"]).astype(dtype)
assert_equal(result, expected)
# bounds testing
values = xr.DataArray(["1_2_3_4_5", "6_7_8_9_10", "11_12"]).astype(dtype)
# positive index
result = values.str[5]
expected = xr.DataArray(["_", "_", ""]).astype(dtype)
assert_equal(result, expected)
# negative index
result = values.str[-6]
expected = xr.DataArray(["_", "8", ""]).astype(dtype)
assert_equal(result, expected)
def test_encode_decode():
data = xr.DataArray(["a", "b", "a\xe4"])
encoded = data.str.encode("utf-8")
decoded = encoded.str.decode("utf-8")
assert_equal(data, decoded)
def test_encode_decode_errors():
encodeBase = xr.DataArray(["a", "b", "a\x9d"])
msg = (
r"'charmap' codec can't encode character '\\x9d' in position 1:"
" character maps to <undefined>"
)
with pytest.raises(UnicodeEncodeError, match=msg):
encodeBase.str.encode("cp1252")
f = lambda x: x.encode("cp1252", "ignore")
result = encodeBase.str.encode("cp1252", "ignore")
expected = xr.DataArray([f(x) for x in encodeBase.values.tolist()])
assert_equal(result, expected)
decodeBase = xr.DataArray([b"a", b"b", b"a\x9d"])
msg = (
"'charmap' codec can't decode byte 0x9d in position 1:"
" character maps to <undefined>"
)
with pytest.raises(UnicodeDecodeError, match=msg):
decodeBase.str.decode("cp1252")
f = lambda x: x.decode("cp1252", "ignore")
result = decodeBase.str.decode("cp1252", "ignore")
expected = xr.DataArray([f(x) for x in decodeBase.values.tolist()])
assert_equal(result, expected)
| apache-2.0 |
JudoWill/ResearchNotebooks | SpeedingTreeStats.py | 1 | 6383 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import sys
sys.path.append('/home/will/PySeqUtils/')
# <codecell>
import TreeingTools
import GeneralSeqTools
import dendropy
# <codecell>
with open('/home/will/SubCData/mafft_ep.fasta') as handle:
seqs = list(GeneralSeqTools.fasta_reader(handle))
# <codecell>
import os, os.path
import csv
from itertools import product
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from operator import methodcaller
from itertools import groupby
from Bio.Seq import Seq
from Bio import Motif
from Bio.Alphabet import IUPAC
from StringIO import StringIO
from subprocess import check_output, check_call
from tempfile import NamedTemporaryFile as NTF
import shlex
# <codecell>
tmp = Motif.Thresholds.ScoreDistribution(mot, precision = 50)
tmp.threshold_fnr?
# <codecell>
def yield_motifs():
with open('/home/will/LTRtfAnalysis/Jaspar_PWMs.txt') as handle:
for key, lines in groupby(handle, methodcaller('startswith', '>')):
if key:
name = lines.next().strip().split()[-1].lower()
else:
tmp = ''.join(lines)
mot = Motif.read(StringIO(tmp), 'jaspar-pfm')
yield name, mot
yield name+'-R', mot.reverse_complement()
tmp = u"""A 0 0 6 1 0 0 0 4 2 2 0 0 3
C 1 1 1 0 5 6 4 1 0 0 0 3 5 5 4 0
G 0 6 0 1 1 0 0 0 0 7 1 1 0 0 1 0
T 6 0 0 0 1 1 3 5 7 0 0 0 0 2 2 4"""
mot = Motif.read(StringIO(tmp), 'jaspar-pfm')
yield 'coup2', mot
yield 'coup2-R', mot.reverse_complement()
pwm_dict = {}
for num, (name, mot) in enumerate(yield_motifs()):
if num % 100 == 0:
print num
#low_thresh = Motif.Thresholds.ScoreDistribution(mot, precision = 50).threshold_fpr(0.01)
#high_thresh = Motif.Thresholds.ScoreDistribution(mot, precision = 50).threshold_fnr(0.3)
pwm_dict[name] = mot
# <codecell>
wanted_mots = ['ap1', 'ap1-R',
'cebpa', 'cebpa-R',
'creb1', 'creb1-R',
'coup2', 'coup2-R',
'ets1','ets1-R',
#'fev', 'fev-R',
'foxc1', 'foxc1-R',
#'gata2', 'gata2-R',
#'gata3', 'gata3-R',
#'hnf4a', 'hnf4a-R',
#'hoxa5', 'hoxa5-R',
'nf-kappab','nf-kappab-R',
'nfatc2', 'nfatc2-R',
'nr2f1','nr2f1-R',
#'tfap2a', 'tfap2a-R',
#'znf354c','znf354c-R',
'sp1', 'sp1-R']
# <codecell>
with open('/home/will/SubCData/C_ltr.fasta') as handle:
raw_seqs = list(GeneralSeqTools.fasta_reader(handle))
# <codecell>
from operator import itemgetter
def run_mafft(inseqs):
orig_order = [name for name, _ in inseqs]
with NTF(suffix = '.fasta') as handle:
GeneralSeqTools.fasta_writer(handle, inseqs)
handle.flush()
os.fsync(handle)
cmd = 'mafft --quiet --op 10 --ep 0.123 %s' % handle.name
out = check_output(shlex.split(cmd))
out_dict = dict(GeneralSeqTools.fasta_reader(StringIO(out)))
return [(name, out_dict[name]) for name in orig_order]
def align_blocks(inseqs, start = 0, winsize = 50):
if start != 0:
slicer = itemgetter(slice(0, start))
yield [(name, slicer(seq)) for name, seq in inseqs]
for num in range(start, len(inseqs[0][1]), winsize):
slicer = itemgetter(slice(num, num+winsize))
yield [(name, slicer(seq)) for name, seq in inseqs]
slicer = itemgetter(slice(num, len(inseqs[0][1])))
yield [(name, slicer(seq)) for name, seq in inseqs]
def join_blocks(blocks):
final_seqs = ['']*len(blocks[0])
for tup in blocks:
seqs = [s for _, s in tup]
for lets in zip(*seqs):
if any(l != '-' for l in lets):
for pos, l in enumerate(lets):
final_seqs[pos] += l
else:
print 'dropped column!'
names = [n for n, _ in tup]
return [(n, s) for n, s in zip(names, final_seqs)]
def refine_alignment(inseqs):
winsizes = [100, 75, 50]
starts = [0, 25, 50, 75]
for win, start in product(winsizes, starts):
blocks = align_blocks(inseqs, winsize=win, start = start)
ablocks = []
print win, start
for num, block in enumerate(blocks):
ablocks.append(run_mafft(block))
inseqs = join_blocks(ablocks)
return inseqs
# <codecell>
aligned_seqs = run_mafft(raw_seqs)
# <codecell>
refined = refine_alignment(aligned_seqs)
# <codecell>
with open('/home/will/SubCData/refined.fasta', 'w') as handle:
GeneralSeqTools.fasta_writer(handle, refined)
# <codecell>
refined = join_blocks(aligned_blocks)
# <codecell>
aligned_seqs[0]
# <codecell>
refined[0]
# <codecell>
from itertools import chain
def score_tf_align(mots, seq):
nseq = Seq(seq.replace('-', ''),
alphabet=IUPAC.unambiguous_dna)
all_scores = -np.inf*np.ones((len(seq), 1)).flatten()
for mot in mots:
scores = mot.scanPWM(nseq)
score_iter = chain(iter(scores.flatten()), [np.nan]*len(mot))
oscore = []
for num, l in enumerate(seq):
if l != '-':
oscore.append(score_iter.next())
else:
oscore.append(-np.inf)
all_scores = np.maximum(all_scores.flatten(), np.array(oscore).flatten())
return all_scores
# <codecell>
from itertools import islice
fig, axs = plt.subplots(100, 1, sharex=True, figsize=(10,30))
for ax, (name, seq) in zip(axs.flatten(), seqs):
out_scores = score_tf_align([pwm_dict[mot] for mot in wanted_mots], seq)
ax.plot(out_scores)
ax.set_xticklabels([])
ax.set_yticklabels([])
# <codecell>
plt.plot(out_scores)
# <codecell>
fig, axs = plt.subplots(len(wanted_mots), 1, figsize = (10, 20), sharex=True)
for mot_name , ax in zip(wanted_mots, axs.flatten()):
score_mat = []
for name, seq in seqs:
score_mat.append(score_tf_align(pwm_dict[mot_name], seq))
score_np = np.array(score_mat)
ax.plot(np.mean(score_np>0, axis = 0))
ax.set_title(mot_name)
fig.tight_layout()
# <codecell>
plt.plot()
# <codecell>
| mit |
kyleam/seaborn | seaborn/timeseries.py | 13 | 15218 | """Timeseries plotting functions."""
from __future__ import division
import numpy as np
import pandas as pd
from scipy import stats, interpolate
import matplotlib as mpl
import matplotlib.pyplot as plt
from .external.six import string_types
from . import utils
from . import algorithms as algo
from .palettes import color_palette
def tsplot(data, time=None, unit=None, condition=None, value=None,
err_style="ci_band", ci=68, interpolate=True, color=None,
estimator=np.mean, n_boot=5000, err_palette=None, err_kws=None,
legend=True, ax=None, **kwargs):
"""Plot one or more timeseries with flexible representation of uncertainty.
This function is intended to be used with data where observations are
nested within sampling units that were measured at multiple timepoints.
It can take data specified either as a long-form (tidy) DataFrame or as an
ndarray with dimensions (unit, time) The interpretation of some of the
other parameters changes depending on the type of object passed as data.
Parameters
----------
data : DataFrame or ndarray
Data for the plot. Should either be a "long form" dataframe or an
array with dimensions (unit, time, condition). In both cases, the
condition field/dimension is optional. The type of this argument
determines the interpretation of the next few parameters. When
using a DataFrame, the index has to be sequential.
time : string or series-like
Either the name of the field corresponding to time in the data
DataFrame or x values for a plot when data is an array. If a Series,
the name will be used to label the x axis.
unit : string
Field in the data DataFrame identifying the sampling unit (e.g.
subject, neuron, etc.). The error representation will collapse over
units at each time/condition observation. This has no role when data
is an array.
value : string
Either the name of the field corresponding to the data values in
the data DataFrame (i.e. the y coordinate) or a string that forms
the y axis label when data is an array.
condition : string or Series-like
Either the name of the field identifying the condition an observation
falls under in the data DataFrame, or a sequence of names with a length
equal to the size of the third dimension of data. There will be a
separate trace plotted for each condition. If condition is a Series
with a name attribute, the name will form the title for the plot
legend (unless legend is set to False).
err_style : string or list of strings or None
Names of ways to plot uncertainty across units from set of
{ci_band, ci_bars, boot_traces, boot_kde, unit_traces, unit_points}.
Can use one or more than one method.
ci : float or list of floats in [0, 100]
Confidence interaval size(s). If a list, it will stack the error
plots for each confidence interval. Only relevant for error styles
with "ci" in the name.
interpolate : boolean
Whether to do a linear interpolation between each timepoint when
plotting. The value of this parameter also determines the marker
used for the main plot traces, unless marker is specified as a keyword
argument.
color : seaborn palette or matplotlib color name or dictionary
Palette or color for the main plots and error representation (unless
plotting by unit, which can be separately controlled with err_palette).
If a dictionary, should map condition name to color spec.
estimator : callable
Function to determine central tendency and to pass to bootstrap
must take an ``axis`` argument.
n_boot : int
Number of bootstrap iterations.
err_palette : seaborn palette
Palette name or list of colors used when plotting data for each unit.
err_kws : dict, optional
Keyword argument dictionary passed through to matplotlib function
generating the error plot,
legend : bool, optional
If ``True`` and there is a ``condition`` variable, add a legend to
the plot.
ax : axis object, optional
Plot in given axis; if None creates a new figure
kwargs :
Other keyword arguments are passed to main plot() call
Returns
-------
ax : matplotlib axis
axis with plot data
Examples
--------
Plot a trace with translucent confidence bands:
.. plot::
:context: close-figs
>>> import numpy as np; np.random.seed(22)
>>> import seaborn as sns; sns.set(color_codes=True)
>>> x = np.linspace(0, 15, 31)
>>> data = np.sin(x) + np.random.rand(10, 31) + np.random.randn(10, 1)
>>> ax = sns.tsplot(data=data)
Plot a long-form dataframe with several conditions:
.. plot::
:context: close-figs
>>> gammas = sns.load_dataset("gammas")
>>> ax = sns.tsplot(time="timepoint", value="BOLD signal",
... unit="subject", condition="ROI",
... data=gammas)
Use error bars at the positions of the observations:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="ci_bars", color="g")
Don't interpolate between the observations:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> ax = sns.tsplot(data=data, err_style="ci_bars", interpolate=False)
Show multiple confidence bands:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, ci=[68, 95], color="m")
Use a different estimator:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, estimator=np.median)
Show each bootstrap resample:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="boot_traces", n_boot=500)
Show the trace from each sampling unit:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="unit_traces")
"""
# Sort out default values for the parameters
if ax is None:
ax = plt.gca()
if err_kws is None:
err_kws = {}
# Handle different types of input data
if isinstance(data, pd.DataFrame):
xlabel = time
ylabel = value
# Condition is optional
if condition is None:
condition = pd.Series(np.ones(len(data)))
legend = False
legend_name = None
n_cond = 1
else:
legend = True and legend
legend_name = condition
n_cond = len(data[condition].unique())
else:
data = np.asarray(data)
# Data can be a timecourse from a single unit or
# several observations in one condition
if data.ndim == 1:
data = data[np.newaxis, :, np.newaxis]
elif data.ndim == 2:
data = data[:, :, np.newaxis]
n_unit, n_time, n_cond = data.shape
# Units are experimental observations. Maybe subjects, or neurons
if unit is None:
units = np.arange(n_unit)
unit = "unit"
units = np.repeat(units, n_time * n_cond)
ylabel = None
# Time forms the xaxis of the plot
if time is None:
times = np.arange(n_time)
else:
times = np.asarray(time)
xlabel = None
if hasattr(time, "name"):
xlabel = time.name
time = "time"
times = np.tile(np.repeat(times, n_cond), n_unit)
# Conditions split the timeseries plots
if condition is None:
conds = range(n_cond)
legend = False
if isinstance(color, dict):
err = "Must have condition names if using color dict."
raise ValueError(err)
else:
conds = np.asarray(condition)
legend = True and legend
if hasattr(condition, "name"):
legend_name = condition.name
else:
legend_name = None
condition = "cond"
conds = np.tile(conds, n_unit * n_time)
# Value forms the y value in the plot
if value is None:
ylabel = None
else:
ylabel = value
value = "value"
# Convert to long-form DataFrame
data = pd.DataFrame(dict(value=data.ravel(),
time=times,
unit=units,
cond=conds))
# Set up the err_style and ci arguments for the loop below
if isinstance(err_style, string_types):
err_style = [err_style]
elif err_style is None:
err_style = []
if not hasattr(ci, "__iter__"):
ci = [ci]
# Set up the color palette
if color is None:
current_palette = mpl.rcParams["axes.color_cycle"]
if len(current_palette) < n_cond:
colors = color_palette("husl", n_cond)
else:
colors = color_palette(n_colors=n_cond)
elif isinstance(color, dict):
colors = [color[c] for c in data[condition].unique()]
else:
try:
colors = color_palette(color, n_cond)
except ValueError:
color = mpl.colors.colorConverter.to_rgb(color)
colors = [color] * n_cond
# Do a groupby with condition and plot each trace
for c, (cond, df_c) in enumerate(data.groupby(condition, sort=False)):
df_c = df_c.pivot(unit, time, value)
x = df_c.columns.values.astype(np.float)
# Bootstrap the data for confidence intervals
boot_data = algo.bootstrap(df_c.values, n_boot=n_boot,
axis=0, func=estimator)
cis = [utils.ci(boot_data, v, axis=0) for v in ci]
central_data = estimator(df_c.values, axis=0)
# Get the color for this condition
color = colors[c]
# Use subroutines to plot the uncertainty
for style in err_style:
# Allow for null style (only plot central tendency)
if style is None:
continue
# Grab the function from the global environment
try:
plot_func = globals()["_plot_%s" % style]
except KeyError:
raise ValueError("%s is not a valid err_style" % style)
# Possibly set up to plot each observation in a different color
if err_palette is not None and "unit" in style:
orig_color = color
color = color_palette(err_palette, len(df_c.values))
# Pass all parameters to the error plotter as keyword args
plot_kwargs = dict(ax=ax, x=x, data=df_c.values,
boot_data=boot_data,
central_data=central_data,
color=color, err_kws=err_kws)
# Plot the error representation, possibly for multiple cis
for ci_i in cis:
plot_kwargs["ci"] = ci_i
plot_func(**plot_kwargs)
if err_palette is not None and "unit" in style:
color = orig_color
# Plot the central trace
kwargs.setdefault("marker", "" if interpolate else "o")
ls = kwargs.pop("ls", "-" if interpolate else "")
kwargs.setdefault("linestyle", ls)
label = cond if legend else "_nolegend_"
ax.plot(x, central_data, color=color, label=label, **kwargs)
# Pad the sides of the plot only when not interpolating
ax.set_xlim(x.min(), x.max())
x_diff = x[1] - x[0]
if not interpolate:
ax.set_xlim(x.min() - x_diff, x.max() + x_diff)
# Add the plot labels
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if legend:
ax.legend(loc=0, title=legend_name)
return ax
# Subroutines for tsplot errorbar plotting
# ----------------------------------------
def _plot_ci_band(ax, x, ci, color, err_kws, **kwargs):
"""Plot translucent error bands around the central tendancy."""
low, high = ci
if "alpha" not in err_kws:
err_kws["alpha"] = 0.2
ax.fill_between(x, low, high, color=color, **err_kws)
def _plot_ci_bars(ax, x, central_data, ci, color, err_kws, **kwargs):
"""Plot error bars at each data point."""
for x_i, y_i, (low, high) in zip(x, central_data, ci.T):
ax.plot([x_i, x_i], [low, high], color=color,
solid_capstyle="round", **err_kws)
def _plot_boot_traces(ax, x, boot_data, color, err_kws, **kwargs):
"""Plot 250 traces from bootstrap."""
err_kws.setdefault("alpha", 0.25)
err_kws.setdefault("linewidth", 0.25)
if "lw" in err_kws:
err_kws["linewidth"] = err_kws.pop("lw")
ax.plot(x, boot_data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_traces(ax, x, data, ci, color, err_kws, **kwargs):
"""Plot a trace for each observation in the original data."""
if isinstance(color, list):
if "alpha" not in err_kws:
err_kws["alpha"] = .5
for i, obs in enumerate(data):
ax.plot(x, obs, color=color[i], label="_nolegend_", **err_kws)
else:
if "alpha" not in err_kws:
err_kws["alpha"] = .2
ax.plot(x, data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_points(ax, x, data, color, err_kws, **kwargs):
"""Plot each original data point discretely."""
if isinstance(color, list):
for i, obs in enumerate(data):
ax.plot(x, obs, "o", color=color[i], alpha=0.8, markersize=4,
label="_nolegend_", **err_kws)
else:
ax.plot(x, data.T, "o", color=color, alpha=0.5, markersize=4,
label="_nolegend_", **err_kws)
def _plot_boot_kde(ax, x, boot_data, color, **kwargs):
"""Plot the kernal density estimate of the bootstrap distribution."""
kwargs.pop("data")
_ts_kde(ax, x, boot_data, color, **kwargs)
def _plot_unit_kde(ax, x, data, color, **kwargs):
"""Plot the kernal density estimate over the sample."""
_ts_kde(ax, x, data, color, **kwargs)
def _ts_kde(ax, x, data, color, **kwargs):
"""Upsample over time and plot a KDE of the bootstrap distribution."""
kde_data = []
y_min, y_max = data.min(), data.max()
y_vals = np.linspace(y_min, y_max, 100)
upsampler = interpolate.interp1d(x, data)
data_upsample = upsampler(np.linspace(x.min(), x.max(), 100))
for pt_data in data_upsample.T:
pt_kde = stats.kde.gaussian_kde(pt_data)
kde_data.append(pt_kde(y_vals))
kde_data = np.transpose(kde_data)
rgb = mpl.colors.ColorConverter().to_rgb(color)
img = np.zeros((kde_data.shape[0], kde_data.shape[1], 4))
img[:, :, :3] = rgb
kde_data /= kde_data.max(axis=0)
kde_data[kde_data > 1] = 1
img[:, :, 3] = kde_data
ax.imshow(img, interpolation="spline16", zorder=2,
extent=(x.min(), x.max(), y_min, y_max),
aspect="auto", origin="lower")
| bsd-3-clause |
murali-munna/scikit-learn | sklearn/tests/test_grid_search.py | 83 | 28713 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
rex-xxx/mt6572_x201 | frameworks/base/tools/velocityplot/velocityplot.py | 9 | 9684 | #!/usr/bin/env python2.6
#
# Copyright (C) 2011 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Plots debug log output from VelocityTracker.
# Enable DEBUG_VELOCITY to print the output.
#
# This code supports side-by-side comparison of two algorithms.
# The old algorithm should be modified to emit debug log messages containing
# the word "OLD".
#
import numpy as np
import matplotlib.pyplot as plot
import subprocess
import re
import fcntl
import os
import errno
import bisect
from datetime import datetime, timedelta
# Parameters.
timespan = 15 # seconds total span shown
scrolljump = 5 # seconds jump when scrolling
timeticks = 1 # seconds between each time tick
# Non-blocking stream wrapper.
class NonBlockingStream:
def __init__(self, stream):
fcntl.fcntl(stream, fcntl.F_SETFL, os.O_NONBLOCK)
self.stream = stream
self.buffer = ''
self.pos = 0
def readline(self):
while True:
index = self.buffer.find('\n', self.pos)
if index != -1:
result = self.buffer[self.pos:index]
self.pos = index + 1
return result
self.buffer = self.buffer[self.pos:]
self.pos = 0
try:
chunk = os.read(self.stream.fileno(), 4096)
except OSError, e:
if e.errno == errno.EAGAIN:
return None
raise e
if len(chunk) == 0:
if len(self.buffer) == 0:
raise(EOFError)
else:
result = self.buffer
self.buffer = ''
self.pos = 0
return result
self.buffer += chunk
# Plotter
class Plotter:
def __init__(self, adbout):
self.adbout = adbout
self.fig = plot.figure(1)
self.fig.suptitle('Velocity Tracker', fontsize=12)
self.fig.set_dpi(96)
self.fig.set_size_inches(16, 12, forward=True)
self.velocity_x = self._make_timeseries()
self.velocity_y = self._make_timeseries()
self.velocity_magnitude = self._make_timeseries()
self.velocity_axes = self._add_timeseries_axes(
1, 'Velocity', 'px/s', [-5000, 5000],
yticks=range(-5000, 5000, 1000))
self.velocity_line_x = self._add_timeseries_line(
self.velocity_axes, 'vx', 'red')
self.velocity_line_y = self._add_timeseries_line(
self.velocity_axes, 'vy', 'green')
self.velocity_line_magnitude = self._add_timeseries_line(
self.velocity_axes, 'magnitude', 'blue')
self._add_timeseries_legend(self.velocity_axes)
shared_axis = self.velocity_axes
self.old_velocity_x = self._make_timeseries()
self.old_velocity_y = self._make_timeseries()
self.old_velocity_magnitude = self._make_timeseries()
self.old_velocity_axes = self._add_timeseries_axes(
2, 'Old Algorithm Velocity', 'px/s', [-5000, 5000],
sharex=shared_axis,
yticks=range(-5000, 5000, 1000))
self.old_velocity_line_x = self._add_timeseries_line(
self.old_velocity_axes, 'vx', 'red')
self.old_velocity_line_y = self._add_timeseries_line(
self.old_velocity_axes, 'vy', 'green')
self.old_velocity_line_magnitude = self._add_timeseries_line(
self.old_velocity_axes, 'magnitude', 'blue')
self._add_timeseries_legend(self.old_velocity_axes)
self.timer = self.fig.canvas.new_timer(interval=100)
self.timer.add_callback(lambda: self.update())
self.timer.start()
self.timebase = None
self._reset_parse_state()
# Initialize a time series.
def _make_timeseries(self):
return [[], []]
# Add a subplot to the figure for a time series.
def _add_timeseries_axes(self, index, title, ylabel, ylim, yticks, sharex=None):
num_graphs = 2
height = 0.9 / num_graphs
top = 0.95 - height * index
axes = self.fig.add_axes([0.1, top, 0.8, height],
xscale='linear',
xlim=[0, timespan],
ylabel=ylabel,
yscale='linear',
ylim=ylim,
sharex=sharex)
axes.text(0.02, 0.02, title, transform=axes.transAxes, fontsize=10, fontweight='bold')
axes.set_xlabel('time (s)', fontsize=10, fontweight='bold')
axes.set_ylabel(ylabel, fontsize=10, fontweight='bold')
axes.set_xticks(range(0, timespan + 1, timeticks))
axes.set_yticks(yticks)
axes.grid(True)
for label in axes.get_xticklabels():
label.set_fontsize(9)
for label in axes.get_yticklabels():
label.set_fontsize(9)
return axes
# Add a line to the axes for a time series.
def _add_timeseries_line(self, axes, label, color, linewidth=1):
return axes.plot([], label=label, color=color, linewidth=linewidth)[0]
# Add a legend to a time series.
def _add_timeseries_legend(self, axes):
axes.legend(
loc='upper left',
bbox_to_anchor=(1.01, 1),
borderpad=0.1,
borderaxespad=0.1,
prop={'size': 10})
# Resets the parse state.
def _reset_parse_state(self):
self.parse_velocity_x = None
self.parse_velocity_y = None
self.parse_velocity_magnitude = None
self.parse_old_velocity_x = None
self.parse_old_velocity_y = None
self.parse_old_velocity_magnitude = None
# Update samples.
def update(self):
timeindex = 0
while True:
try:
line = self.adbout.readline()
except EOFError:
plot.close()
return
if line is None:
break
print line
try:
timestamp = self._parse_timestamp(line)
except ValueError, e:
continue
if self.timebase is None:
self.timebase = timestamp
delta = timestamp - self.timebase
timeindex = delta.seconds + delta.microseconds * 0.000001
if line.find(': position') != -1:
self.parse_velocity_x = self._get_following_number(line, 'vx=')
self.parse_velocity_y = self._get_following_number(line, 'vy=')
self.parse_velocity_magnitude = self._get_following_number(line, 'speed=')
self._append(self.velocity_x, timeindex, self.parse_velocity_x)
self._append(self.velocity_y, timeindex, self.parse_velocity_y)
self._append(self.velocity_magnitude, timeindex, self.parse_velocity_magnitude)
if line.find(': OLD') != -1:
self.parse_old_velocity_x = self._get_following_number(line, 'vx=')
self.parse_old_velocity_y = self._get_following_number(line, 'vy=')
self.parse_old_velocity_magnitude = self._get_following_number(line, 'speed=')
self._append(self.old_velocity_x, timeindex, self.parse_old_velocity_x)
self._append(self.old_velocity_y, timeindex, self.parse_old_velocity_y)
self._append(self.old_velocity_magnitude, timeindex, self.parse_old_velocity_magnitude)
# Scroll the plots.
if timeindex > timespan:
bottom = int(timeindex) - timespan + scrolljump
self.timebase += timedelta(seconds=bottom)
self._scroll(self.velocity_x, bottom)
self._scroll(self.velocity_y, bottom)
self._scroll(self.velocity_magnitude, bottom)
self._scroll(self.old_velocity_x, bottom)
self._scroll(self.old_velocity_y, bottom)
self._scroll(self.old_velocity_magnitude, bottom)
# Redraw the plots.
self.velocity_line_x.set_data(self.velocity_x)
self.velocity_line_y.set_data(self.velocity_y)
self.velocity_line_magnitude.set_data(self.velocity_magnitude)
self.old_velocity_line_x.set_data(self.old_velocity_x)
self.old_velocity_line_y.set_data(self.old_velocity_y)
self.old_velocity_line_magnitude.set_data(self.old_velocity_magnitude)
self.fig.canvas.draw_idle()
# Scroll a time series.
def _scroll(self, timeseries, bottom):
bottom_index = bisect.bisect_left(timeseries[0], bottom)
del timeseries[0][:bottom_index]
del timeseries[1][:bottom_index]
for i, timeindex in enumerate(timeseries[0]):
timeseries[0][i] = timeindex - bottom
# Extract a word following the specified prefix.
def _get_following_word(self, line, prefix):
prefix_index = line.find(prefix)
if prefix_index == -1:
return None
start_index = prefix_index + len(prefix)
delim_index = line.find(',', start_index)
if delim_index == -1:
return line[start_index:]
else:
return line[start_index:delim_index]
# Extract a number following the specified prefix.
def _get_following_number(self, line, prefix):
word = self._get_following_word(line, prefix)
if word is None:
return None
return float(word)
# Add a value to a time series.
def _append(self, timeseries, timeindex, number):
timeseries[0].append(timeindex)
timeseries[1].append(number)
# Parse the logcat timestamp.
# Timestamp has the form '01-21 20:42:42.930'
def _parse_timestamp(self, line):
return datetime.strptime(line[0:18], '%m-%d %H:%M:%S.%f')
# Notice
print "Velocity Tracker plotting tool"
print "-----------------------------------------\n"
print "Please enable debug logging and recompile the code."
# Start adb.
print "Starting adb logcat.\n"
adb = subprocess.Popen(['adb', 'logcat', '-s', '-v', 'time', 'Input:*', 'VelocityTracker:*'],
stdout=subprocess.PIPE)
adbout = NonBlockingStream(adb.stdout)
# Prepare plotter.
plotter = Plotter(adbout)
plotter.update()
# Main loop.
plot.show()
| gpl-2.0 |
ciffcesarhernandez/AF5_PRACTICA3 | prusontchm/ratios.py | 1 | 1844 | def div0( a, b ):
import numpy as np
""" ignore / 0, div0( [-1, 0, 1], 0 ) -> [0, 0, 0] """
with np.errstate(divide='ignore', invalid='ignore'):
c = np.true_divide( a, b )
c[ ~ np.isfinite( c )] = a.max() # -inf inf NaN
return c
def NuevosRatios (df):
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn import decomposition
# Eliminamos antes del cálculo de los ratios las columnas target e id.
df.columns = [x.lower() for x in df.columns]
objetivo = [col for col in df.columns if 'target' in col]
objetivo = ''.join(objetivo)
dfBorrar = df[['id', objetivo]]
borrar = ['id', objetivo]
dfaux = df.drop(borrar, axis=1)
numColumnas = len(dfaux.columns)
columnas= dfaux.columns
for ind in range(0,numColumnas) :
for ind2 in range(0,numColumnas) :
if(ind==ind2):
dfaux[columnas[ind]+"^2"] = dfaux[columnas[ind]]**2
else:
dfaux[columnas[ind]+"-"+columnas[ind2]]=dfaux[columnas[ind]]-dfaux[columnas[ind2]]
dfaux[columnas[ind]+"/"+columnas[ind2]] = div0(dfaux[columnas[ind]],dfaux[columnas[ind2]])
dfaux["("+columnas[ind]+"-"+columnas[ind2]+")"+"/"+columnas[ind2]] = div0((dfaux[columnas[ind]]-dfaux[columnas[ind2]]),dfaux[columnas[ind2]])
if ind<ind2:
dfaux[columnas[ind]+"+"+columnas[ind2]]=dfaux[columnas[ind]]+dfaux[columnas[ind2]]
dfaux[columnas[ind]+"*"+columnas[ind2]]=dfaux[columnas[ind]]*dfaux[columnas[ind2]]
list_inputs = dfaux.columns
# Una vez terminado el cálculo de los ratios se añaden de nuevo las columnas target e id.
dfVar = pd.concat([dfBorrar, dfaux], axis=1)
return dfVar | gpl-3.0 |
elkingtonmcb/scikit-learn | sklearn/linear_model/bayes.py | 220 | 15248 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
| bsd-3-clause |
ocefpaf/cartopy | lib/cartopy/examples/favicon.py | 4 | 1684 | """
Cartopy Favicon
---------------
The actual code to generate cartopy's favicon.
"""
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import matplotlib.textpath
import matplotlib.patches
from matplotlib.font_manager import FontProperties
import numpy as np
def main():
fig = plt.figure(figsize=[8, 8])
ax = fig.add_subplot(1, 1, 1, projection=ccrs.SouthPolarStereo())
ax.coastlines()
ax.gridlines()
im = ax.stock_img()
def on_draw(event=None):
"""
Hook into matplotlib's event mechanism to define the clip path of the
background image.
"""
# Clip the image to the current background boundary.
im.set_clip_path(ax.background_patch.get_path(),
transform=ax.background_patch.get_transform())
# Register the on_draw method and call it once now.
fig.canvas.mpl_connect('draw_event', on_draw)
on_draw()
# Generate a matplotlib path representing the character "C".
fp = FontProperties(family='Bitstream Vera Sans', weight='bold')
logo_path = matplotlib.textpath.TextPath((-4.5e7, -3.7e7),
'C', size=1, prop=fp)
# Scale the letter up to an appropriate X and Y scale.
logo_path._vertices *= np.array([103250000, 103250000])
# Add the path as a patch, drawing black outlines around the text.
patch = matplotlib.patches.PathPatch(logo_path, facecolor='white',
edgecolor='black', linewidth=10,
transform=ccrs.SouthPolarStereo())
ax.add_patch(patch)
plt.show()
if __name__ == '__main__':
main()
| lgpl-3.0 |
zfrenchee/pandas | pandas/tests/series/test_api.py | 1 | 27723 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
from collections import OrderedDict
import pytest
import numpy as np
import pandas as pd
from pandas import Index, Series, DataFrame, date_range
from pandas.core.indexes.datetimes import Timestamp
from pandas.compat import range, lzip, isidentifier, string_types
from pandas import (compat, Categorical, period_range, timedelta_range,
DatetimeIndex, PeriodIndex, TimedeltaIndex)
import pandas.io.formats.printing as printing
from pandas.util.testing import (assert_series_equal,
ensure_clean)
import pandas.util.testing as tm
from .common import TestData
class SharedWithSparse(object):
"""
A collection of tests Series and SparseSeries can share.
In generic tests on this class, use ``self._assert_series_equal()``
which is implemented in sub-classes.
"""
def _assert_series_equal(self, left, right):
"""Dispatch to series class dependent assertion"""
raise NotImplementedError
def test_scalarop_preserve_name(self):
result = self.ts * 2
assert result.name == self.ts.name
def test_copy_name(self):
result = self.ts.copy()
assert result.name == self.ts.name
def test_copy_index_name_checking(self):
# don't want to be able to modify the index stored elsewhere after
# making a copy
self.ts.index.name = None
assert self.ts.index.name is None
assert self.ts is self.ts
cp = self.ts.copy()
cp.index.name = 'foo'
printing.pprint_thing(self.ts.index.name)
assert self.ts.index.name is None
def test_append_preserve_name(self):
result = self.ts[:5].append(self.ts[5:])
assert result.name == self.ts.name
def test_binop_maybe_preserve_name(self):
# names match, preserve
result = self.ts * self.ts
assert result.name == self.ts.name
result = self.ts.mul(self.ts)
assert result.name == self.ts.name
result = self.ts * self.ts[:-2]
assert result.name == self.ts.name
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = 'something else'
result = self.ts + cp
assert result.name is None
result = self.ts.add(cp)
assert result.name is None
ops = ['add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow']
ops = ops + ['r' + op for op in ops]
for op in ops:
# names match, preserve
s = self.ts.copy()
result = getattr(s, op)(s)
assert result.name == self.ts.name
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = 'changed'
result = getattr(s, op)(cp)
assert result.name is None
def test_combine_first_name(self):
result = self.ts.combine_first(self.ts[:5])
assert result.name == self.ts.name
def test_getitem_preserve_name(self):
result = self.ts[self.ts > 0]
assert result.name == self.ts.name
result = self.ts[[0, 2, 4]]
assert result.name == self.ts.name
result = self.ts[5:10]
assert result.name == self.ts.name
def test_pickle(self):
unp_series = self._pickle_roundtrip(self.series)
unp_ts = self._pickle_roundtrip(self.ts)
assert_series_equal(unp_series, self.series)
assert_series_equal(unp_ts, self.ts)
def _pickle_roundtrip(self, obj):
with ensure_clean() as path:
obj.to_pickle(path)
unpickled = pd.read_pickle(path)
return unpickled
def test_argsort_preserve_name(self):
result = self.ts.argsort()
assert result.name == self.ts.name
def test_sort_index_name(self):
result = self.ts.sort_index(ascending=False)
assert result.name == self.ts.name
def test_to_sparse_pass_name(self):
result = self.ts.to_sparse()
assert result.name == self.ts.name
def test_constructor_dict(self):
d = {'a': 0., 'b': 1., 'c': 2.}
result = self.series_klass(d)
expected = self.series_klass(d, index=sorted(d.keys()))
self._assert_series_equal(result, expected)
result = self.series_klass(d, index=['b', 'c', 'd', 'a'])
expected = self.series_klass([1, 2, np.nan, 0],
index=['b', 'c', 'd', 'a'])
self._assert_series_equal(result, expected)
def test_constructor_subclass_dict(self):
data = tm.TestSubDict((x, 10.0 * x) for x in range(10))
series = self.series_klass(data)
expected = self.series_klass(dict(compat.iteritems(data)))
self._assert_series_equal(series, expected)
def test_constructor_ordereddict(self):
# GH3283
data = OrderedDict(
('col%s' % i, np.random.random()) for i in range(12))
series = self.series_klass(data)
expected = self.series_klass(list(data.values()), list(data.keys()))
self._assert_series_equal(series, expected)
# Test with subclass
class A(OrderedDict):
pass
series = self.series_klass(A(data))
self._assert_series_equal(series, expected)
def test_constructor_dict_multiindex(self):
d = {('a', 'a'): 0., ('b', 'a'): 1., ('b', 'c'): 2.}
_d = sorted(d.items())
result = self.series_klass(d)
expected = self.series_klass(
[x[1] for x in _d],
index=pd.MultiIndex.from_tuples([x[0] for x in _d]))
self._assert_series_equal(result, expected)
d['z'] = 111.
_d.insert(0, ('z', d['z']))
result = self.series_klass(d)
expected = self.series_klass([x[1] for x in _d],
index=pd.Index([x[0] for x in _d],
tupleize_cols=False))
result = result.reindex(index=expected.index)
self._assert_series_equal(result, expected)
def test_constructor_dict_timedelta_index(self):
# GH #12169 : Resample category data with timedelta index
# construct Series from dict as data and TimedeltaIndex as index
# will result NaN in result Series data
expected = self.series_klass(
data=['A', 'B', 'C'],
index=pd.to_timedelta([0, 10, 20], unit='s')
)
result = self.series_klass(
data={pd.to_timedelta(0, unit='s'): 'A',
pd.to_timedelta(10, unit='s'): 'B',
pd.to_timedelta(20, unit='s'): 'C'},
index=pd.to_timedelta([0, 10, 20], unit='s')
)
self._assert_series_equal(result, expected)
def test_from_array_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
self.series_klass.from_array([1, 2, 3])
class TestSeriesMisc(TestData, SharedWithSparse):
series_klass = Series
# SharedWithSparse tests use generic, series_klass-agnostic assertion
_assert_series_equal = staticmethod(tm.assert_series_equal)
def test_tab_completion(self):
# GH 9910
s = Series(list('abcd'))
# Series of str values should have .str but not .dt/.cat in __dir__
assert 'str' in dir(s)
assert 'dt' not in dir(s)
assert 'cat' not in dir(s)
# similarly for .dt
s = Series(date_range('1/1/2015', periods=5))
assert 'dt' in dir(s)
assert 'str' not in dir(s)
assert 'cat' not in dir(s)
# Similarly for .cat, but with the twist that str and dt should be
# there if the categories are of that type first cat and str.
s = Series(list('abbcd'), dtype="category")
assert 'cat' in dir(s)
assert 'str' in dir(s) # as it is a string categorical
assert 'dt' not in dir(s)
# similar to cat and str
s = Series(date_range('1/1/2015', periods=5)).astype("category")
assert 'cat' in dir(s)
assert 'str' not in dir(s)
assert 'dt' in dir(s) # as it is a datetime categorical
def test_tab_completion_with_categorical(self):
# test the tab completion display
ok_for_cat = ['categories', 'codes', 'ordered', 'set_categories',
'add_categories', 'remove_categories',
'rename_categories', 'reorder_categories',
'remove_unused_categories', 'as_ordered', 'as_unordered']
def get_dir(s):
results = [r for r in s.cat.__dir__() if not r.startswith('_')]
return list(sorted(set(results)))
s = Series(list('aabbcde')).astype('category')
results = get_dir(s)
tm.assert_almost_equal(results, list(sorted(set(ok_for_cat))))
@pytest.mark.parametrize("index", [
tm.makeUnicodeIndex(10),
tm.makeStringIndex(10),
tm.makeCategoricalIndex(10),
Index(['foo', 'bar', 'baz'] * 2),
tm.makeDateIndex(10),
tm.makePeriodIndex(10),
tm.makeTimedeltaIndex(10),
tm.makeIntIndex(10),
tm.makeUIntIndex(10),
tm.makeIntIndex(10),
tm.makeFloatIndex(10),
Index([True, False]),
Index(['a{}'.format(i) for i in range(101)]),
pd.MultiIndex.from_tuples(lzip('ABCD', 'EFGH')),
pd.MultiIndex.from_tuples(lzip([0, 1, 2, 3], 'EFGH')), ])
def test_index_tab_completion(self, index):
# dir contains string-like values of the Index.
s = pd.Series(index=index)
dir_s = dir(s)
for i, x in enumerate(s.index.unique(level=0)):
if i < 100:
assert (not isinstance(x, string_types) or
not isidentifier(x) or x in dir_s)
else:
assert x not in dir_s
def test_not_hashable(self):
s_empty = Series()
s = Series([1])
pytest.raises(TypeError, hash, s_empty)
pytest.raises(TypeError, hash, s)
def test_contains(self):
tm.assert_contains_all(self.ts.index, self.ts)
def test_iter(self):
for i, val in enumerate(self.series):
assert val == self.series[i]
for i, val in enumerate(self.ts):
assert val == self.ts[i]
def test_keys(self):
# HACK: By doing this in two stages, we avoid 2to3 wrapping the call
# to .keys() in a list()
getkeys = self.ts.keys
assert getkeys() is self.ts.index
def test_values(self):
tm.assert_almost_equal(self.ts.values, self.ts, check_dtype=False)
def test_iteritems(self):
for idx, val in compat.iteritems(self.series):
assert val == self.series[idx]
for idx, val in compat.iteritems(self.ts):
assert val == self.ts[idx]
# assert is lazy (genrators don't define reverse, lists do)
assert not hasattr(self.series.iteritems(), 'reverse')
def test_items(self):
for idx, val in self.series.items():
assert val == self.series[idx]
for idx, val in self.ts.items():
assert val == self.ts[idx]
# assert is lazy (genrators don't define reverse, lists do)
assert not hasattr(self.series.items(), 'reverse')
def test_raise_on_info(self):
s = Series(np.random.randn(10))
with pytest.raises(AttributeError):
s.info()
def test_copy(self):
for deep in [None, False, True]:
s = Series(np.arange(10), dtype='float64')
# default deep is True
if deep is None:
s2 = s.copy()
else:
s2 = s.copy(deep=deep)
s2[::2] = np.NaN
if deep is None or deep is True:
# Did not modify original Series
assert np.isnan(s2[0])
assert not np.isnan(s[0])
else:
# we DID modify the original Series
assert np.isnan(s2[0])
assert np.isnan(s[0])
# GH 11794
# copy of tz-aware
expected = Series([Timestamp('2012/01/01', tz='UTC')])
expected2 = Series([Timestamp('1999/01/01', tz='UTC')])
for deep in [None, False, True]:
s = Series([Timestamp('2012/01/01', tz='UTC')])
if deep is None:
s2 = s.copy()
else:
s2 = s.copy(deep=deep)
s2[0] = pd.Timestamp('1999/01/01', tz='UTC')
# default deep is True
if deep is None or deep is True:
# Did not modify original Series
assert_series_equal(s2, expected2)
assert_series_equal(s, expected)
else:
# we DID modify the original Series
assert_series_equal(s2, expected2)
assert_series_equal(s, expected2)
def test_axis_alias(self):
s = Series([1, 2, np.nan])
assert_series_equal(s.dropna(axis='rows'), s.dropna(axis='index'))
assert s.dropna().sum('rows') == 3
assert s._get_axis_number('rows') == 0
assert s._get_axis_name('rows') == 'index'
def test_class_axis(self):
# https://github.com/pandas-dev/pandas/issues/18147
Series.index # no exception!
def test_numpy_unique(self):
# it works!
np.unique(self.ts)
def test_ndarray_compat(self):
# test numpy compat with Series as sub-class of NDFrame
tsdf = DataFrame(np.random.randn(1000, 3), columns=['A', 'B', 'C'],
index=date_range('1/1/2000', periods=1000))
def f(x):
return x[x.idxmax()]
result = tsdf.apply(f)
expected = tsdf.max()
tm.assert_series_equal(result, expected)
# .item()
s = Series([1])
result = s.item()
assert result == 1
assert s.item() == s.iloc[0]
# using an ndarray like function
s = Series(np.random.randn(10))
result = Series(np.ones_like(s))
expected = Series(1, index=range(10), dtype='float64')
tm.assert_series_equal(result, expected)
# ravel
s = Series(np.random.randn(10))
tm.assert_almost_equal(s.ravel(order='F'), s.values.ravel(order='F'))
# compress
# GH 6658
s = Series([0, 1., -1], index=list('abc'))
result = np.compress(s > 0, s)
tm.assert_series_equal(result, Series([1.], index=['b']))
result = np.compress(s < -1, s)
# result empty Index(dtype=object) as the same as original
exp = Series([], dtype='float64', index=Index([], dtype='object'))
tm.assert_series_equal(result, exp)
s = Series([0, 1., -1], index=[.1, .2, .3])
result = np.compress(s > 0, s)
tm.assert_series_equal(result, Series([1.], index=[.2]))
result = np.compress(s < -1, s)
# result empty Float64Index as the same as original
exp = Series([], dtype='float64', index=Index([], dtype='float64'))
tm.assert_series_equal(result, exp)
def test_str_attribute(self):
# GH9068
methods = ['strip', 'rstrip', 'lstrip']
s = Series([' jack', 'jill ', ' jesse ', 'frank'])
for method in methods:
expected = Series([getattr(str, method)(x) for x in s.values])
assert_series_equal(getattr(Series.str, method)(s.str), expected)
# str accessor only valid with string values
s = Series(range(5))
with tm.assert_raises_regex(AttributeError,
'only use .str accessor'):
s.str.repeat(2)
def test_empty_method(self):
s_empty = pd.Series()
assert s_empty.empty
for full_series in [pd.Series([1]), pd.Series(index=[1])]:
assert not full_series.empty
def test_tab_complete_warning(self, ip):
# https://github.com/pandas-dev/pandas/issues/16409
pytest.importorskip('IPython', minversion="6.0.0")
from IPython.core.completer import provisionalcompleter
code = "import pandas as pd; s = pd.Series()"
ip.run_code(code)
with tm.assert_produces_warning(None):
with provisionalcompleter('ignore'):
list(ip.Completer.completions('s.', 1))
class TestCategoricalSeries(object):
@pytest.mark.parametrize(
"method",
[
lambda x: x.cat.set_categories([1, 2, 3]),
lambda x: x.cat.reorder_categories([2, 3, 1], ordered=True),
lambda x: x.cat.rename_categories([1, 2, 3]),
lambda x: x.cat.remove_unused_categories(),
lambda x: x.cat.remove_categories([2]),
lambda x: x.cat.add_categories([4]),
lambda x: x.cat.as_ordered(),
lambda x: x.cat.as_unordered(),
])
def test_getname_categorical_accessor(self, method):
# GH 17509
s = Series([1, 2, 3], name='A').astype('category')
expected = 'A'
result = method(s).name
assert result == expected
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
tm.assert_index_equal(s.cat.categories, Index(["a", "b"]))
assert not s.cat.ordered, False
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
tm.assert_categorical_equal(s.values, exp)
res = s.cat.set_categories(["b", "a"])
tm.assert_categorical_equal(res.values, exp)
s[:] = "a"
s = s.cat.remove_unused_categories()
tm.assert_index_equal(s.cat.categories, Index(["a"]))
def test_cat_accessor_api(self):
# GH 9322
from pandas.core.categorical import CategoricalAccessor
assert Series.cat is CategoricalAccessor
s = Series(list('aabbcde')).astype('category')
assert isinstance(s.cat, CategoricalAccessor)
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .cat accessor"):
invalid.cat
assert not hasattr(invalid, 'cat')
def test_cat_accessor_no_new_attributes(self):
# https://github.com/pandas-dev/pandas/issues/10673
c = Series(list('aabbcde')).astype('category')
with tm.assert_raises_regex(AttributeError,
"You cannot add any new attribute"):
c.cat.xlabel = "a"
def test_categorical_delegations(self):
# invalid accessor
pytest.raises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assert_raises_regex(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: Series([1, 2, 3]).cat)
pytest.raises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
pytest.raises(AttributeError, lambda: Series(np.arange(5.)).cat)
pytest.raises(AttributeError,
lambda: Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical# -*- coding: utf-8 -*-
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = Index(["a", "b", "c"])
tm.assert_index_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = Index([1, 2, 3])
tm.assert_index_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
assert s.cat.ordered
s = s.cat.as_unordered()
assert not s.cat.ordered
s.cat.as_ordered(inplace=True)
assert s.cat.ordered
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = Index(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_)
s = s.cat.set_categories(["c", "b", "a"])
tm.assert_index_equal(s.cat.categories, exp_categories)
tm.assert_numpy_array_equal(s.values.__array__(), exp_values)
tm.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
]))
exp_categories = Index(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"], dtype=np.object_)
s = s.cat.remove_unused_categories()
tm.assert_index_equal(s.cat.categories, exp_categories)
tm.assert_numpy_array_equal(s.values.__array__(), exp_values)
tm.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
def f():
s.set_categories([4, 3, 2, 1])
pytest.raises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
# GH18862 (let Series.cat.rename_categories take callables)
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
result = s.cat.rename_categories(lambda x: x.upper())
expected = Series(Categorical(["A", "B", "C", "A"],
categories=["A", "B", "C"],
ordered=True))
tm.assert_series_equal(result, expected)
def test_str_accessor_api_for_categorical(self):
# https://github.com/pandas-dev/pandas/issues/10661
from pandas.core.strings import StringMethods
s = Series(list('aabb'))
s = s + " " + s
c = s.astype('category')
assert isinstance(c.str, StringMethods)
# str functions, which need special arguments
special_func_defs = [
('cat', (list("zyxw"),), {"sep": ","}),
('center', (10,), {}),
('contains', ("a",), {}),
('count', ("a",), {}),
('decode', ("UTF-8",), {}),
('encode', ("UTF-8",), {}),
('endswith', ("a",), {}),
('extract', ("([a-z]*) ",), {"expand": False}),
('extract', ("([a-z]*) ",), {"expand": True}),
('extractall', ("([a-z]*) ",), {}),
('find', ("a",), {}),
('findall', ("a",), {}),
('index', (" ",), {}),
('ljust', (10,), {}),
('match', ("a"), {}), # deprecated...
('normalize', ("NFC",), {}),
('pad', (10,), {}),
('partition', (" ",), {"expand": False}), # not default
('partition', (" ",), {"expand": True}), # default
('repeat', (3,), {}),
('replace', ("a", "z"), {}),
('rfind', ("a",), {}),
('rindex', (" ",), {}),
('rjust', (10,), {}),
('rpartition', (" ",), {"expand": False}), # not default
('rpartition', (" ",), {"expand": True}), # default
('slice', (0, 1), {}),
('slice_replace', (0, 1, "z"), {}),
('split', (" ",), {"expand": False}), # default
('split', (" ",), {"expand": True}), # not default
('startswith', ("a",), {}),
('wrap', (2,), {}),
('zfill', (10,), {})
]
_special_func_names = [f[0] for f in special_func_defs]
# * get, join: they need a individual elements of type lists, but
# we can't make a categorical with lists as individual categories.
# -> `s.str.split(" ").astype("category")` will error!
# * `translate` has different interfaces for py2 vs. py3
_ignore_names = ["get", "join", "translate"]
str_func_names = [f for f in dir(s.str) if not (
f.startswith("_") or
f in _special_func_names or
f in _ignore_names)]
func_defs = [(f, (), {}) for f in str_func_names]
func_defs.extend(special_func_defs)
for func, args, kwargs in func_defs:
res = getattr(c.str, func)(*args, **kwargs)
exp = getattr(s.str, func)(*args, **kwargs)
if isinstance(res, DataFrame):
tm.assert_frame_equal(res, exp)
else:
tm.assert_series_equal(res, exp)
invalid = Series([1, 2, 3]).astype('category')
with tm.assert_raises_regex(AttributeError,
"Can only use .str "
"accessor with string"):
invalid.str
assert not hasattr(invalid, 'str')
def test_dt_accessor_api_for_categorical(self):
# https://github.com/pandas-dev/pandas/issues/10661
from pandas.core.indexes.accessors import Properties
s_dr = Series(date_range('1/1/2015', periods=5, tz="MET"))
c_dr = s_dr.astype("category")
s_pr = Series(period_range('1/1/2015', freq='D', periods=5))
c_pr = s_pr.astype("category")
s_tdr = Series(timedelta_range('1 days', '10 days'))
c_tdr = s_tdr.astype("category")
# only testing field (like .day)
# and bool (is_month_start)
get_ops = lambda x: x._datetimelike_ops
test_data = [
("Datetime", get_ops(DatetimeIndex), s_dr, c_dr),
("Period", get_ops(PeriodIndex), s_pr, c_pr),
("Timedelta", get_ops(TimedeltaIndex), s_tdr, c_tdr)]
assert isinstance(c_dr.dt, Properties)
special_func_defs = [
('strftime', ("%Y-%m-%d",), {}),
('tz_convert', ("EST",), {}),
('round', ("D",), {}),
('floor', ("D",), {}),
('ceil', ("D",), {}),
('asfreq', ("D",), {}),
# ('tz_localize', ("UTC",), {}),
]
_special_func_names = [f[0] for f in special_func_defs]
# the series is already localized
_ignore_names = ['tz_localize', 'components']
for name, attr_names, s, c in test_data:
func_names = [f
for f in dir(s.dt)
if not (f.startswith("_") or f in attr_names or f in
_special_func_names or f in _ignore_names)]
func_defs = [(f, (), {}) for f in func_names]
for f_def in special_func_defs:
if f_def[0] in dir(s.dt):
func_defs.append(f_def)
for func, args, kwargs in func_defs:
res = getattr(c.dt, func)(*args, **kwargs)
exp = getattr(s.dt, func)(*args, **kwargs)
if isinstance(res, DataFrame):
tm.assert_frame_equal(res, exp)
elif isinstance(res, Series):
tm.assert_series_equal(res, exp)
else:
tm.assert_almost_equal(res, exp)
for attr in attr_names:
try:
res = getattr(c.dt, attr)
exp = getattr(s.dt, attr)
except Exception as e:
print(name, attr)
raise e
if isinstance(res, DataFrame):
tm.assert_frame_equal(res, exp)
elif isinstance(res, Series):
tm.assert_series_equal(res, exp)
else:
tm.assert_almost_equal(res, exp)
invalid = Series([1, 2, 3]).astype('category')
with tm.assert_raises_regex(
AttributeError, "Can only use .dt accessor with datetimelike"):
invalid.dt
assert not hasattr(invalid, 'str')
| bsd-3-clause |
mfjb/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
soylentdeen/CIAO-commissioning-tools | sandbox/TTFocusPlotter.py | 1 | 1425 | import scipy
import matplotlib.pyplot as pyplot
import VLTTools
import numpy
import time
datadir = "/diska/data/SPARTA/2015-04-29/TTFocus/"
ciao = VLTTools.VLTConnection(simulate=False, datapath=datadir)
fig = pyplot.figure(0)
fig.clear()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
Tip = []
Tilt = []
Focus = []
length = 120
tip = numpy.zeros(length)
tilt = numpy.zeros(length)
focus = numpy.zeros(length)
tipPlot, = ax.plot(tip, c='b')
tiltPlot, = ax.plot(tilt, c='g')
focusPlot, = ax.plot(focus, c='r')
fig.show()
pix2m = (3.14159/(180.0*3600.0)) * 8.0*0.5/2.0
n = 13.49
while True:
TTF = ciao.getTTFocus() * pix2m
tip = numpy.append(tip,TTF[0]*4*n*1000.0)
tilt = numpy.append(tilt, TTF[2]*4*n*1000.0)
focus = numpy.append(focus, TTF[4]*16*(3.0)**(0.5)*n**2.0*1000.0)
tipPlot.set_ydata(tip[-length:])
tiltPlot.set_ydata(tilt[-length:])
focusPlot.set_ydata(focus[-length:])
mn = numpy.min([numpy.min(tipPlot.get_data()[1]), numpy.min(tiltPlot.get_data()[1]), numpy.min(focusPlot.get_data()[1])])
mx = numpy.max([numpy.max(tipPlot.get_data()[1]), numpy.max(tiltPlot.get_data()[1]), numpy.max(focusPlot.get_data()[1])])
ax.set_ybound(lower=mn*1.2, upper=mx*1.2)
fig.canvas.draw()
TTF *= 1e9
# print("focus: %.4f, tip: %.4f, tilt: %.4f" % (focus[-1], tip[-1], tilt[-1]))
print("focus: %.0f, tip: %.0f, tilt: %.0f" % (TTF[4], TTF[0], TTF[2]))
time.sleep(1)
| mit |
runawayhorse001/LearningApacheSpark | pyspark/sql/utils.py | 14 | 6894 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import py4j
class CapturedException(Exception):
def __init__(self, desc, stackTrace):
self.desc = desc
self.stackTrace = stackTrace
def __str__(self):
return repr(self.desc)
class AnalysisException(CapturedException):
"""
Failed to analyze a SQL query plan.
"""
class ParseException(CapturedException):
"""
Failed to parse a SQL command.
"""
class IllegalArgumentException(CapturedException):
"""
Passed an illegal or inappropriate argument.
"""
class StreamingQueryException(CapturedException):
"""
Exception that stopped a :class:`StreamingQuery`.
"""
class QueryExecutionException(CapturedException):
"""
Failed to execute a query.
"""
def capture_sql_exception(f):
def deco(*a, **kw):
try:
return f(*a, **kw)
except py4j.protocol.Py4JJavaError as e:
s = e.java_exception.toString()
stackTrace = '\n\t at '.join(map(lambda x: x.toString(),
e.java_exception.getStackTrace()))
if s.startswith('org.apache.spark.sql.AnalysisException: '):
raise AnalysisException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.catalyst.analysis'):
raise AnalysisException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.catalyst.parser.ParseException: '):
raise ParseException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.streaming.StreamingQueryException: '):
raise StreamingQueryException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.execution.QueryExecutionException: '):
raise QueryExecutionException(s.split(': ', 1)[1], stackTrace)
if s.startswith('java.lang.IllegalArgumentException: '):
raise IllegalArgumentException(s.split(': ', 1)[1], stackTrace)
raise
return deco
def install_exception_handler():
"""
Hook an exception handler into Py4j, which could capture some SQL exceptions in Java.
When calling Java API, it will call `get_return_value` to parse the returned object.
If any exception happened in JVM, the result will be Java exception object, it raise
py4j.protocol.Py4JJavaError. We replace the original `get_return_value` with one that
could capture the Java exception and throw a Python one (with the same error message).
It's idempotent, could be called multiple times.
"""
original = py4j.protocol.get_return_value
# The original `get_return_value` is not patched, it's idempotent.
patched = capture_sql_exception(original)
# only patch the one used in py4j.java_gateway (call Java API)
py4j.java_gateway.get_return_value = patched
def toJArray(gateway, jtype, arr):
"""
Convert python list to java type array
:param gateway: Py4j Gateway
:param jtype: java type of element in array
:param arr: python type list
"""
jarr = gateway.new_array(jtype, len(arr))
for i in range(0, len(arr)):
jarr[i] = arr[i]
return jarr
def require_minimum_pandas_version():
""" Raise ImportError if minimum version of Pandas is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pandas_version = "0.19.2"
from distutils.version import LooseVersion
try:
import pandas
have_pandas = True
except ImportError:
have_pandas = False
if not have_pandas:
raise ImportError("Pandas >= %s must be installed; however, "
"it was not found." % minimum_pandas_version)
if LooseVersion(pandas.__version__) < LooseVersion(minimum_pandas_version):
raise ImportError("Pandas >= %s must be installed; however, "
"your version was %s." % (minimum_pandas_version, pandas.__version__))
def require_minimum_pyarrow_version():
""" Raise ImportError if minimum version of pyarrow is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pyarrow_version = "0.8.0"
from distutils.version import LooseVersion
try:
import pyarrow
have_arrow = True
except ImportError:
have_arrow = False
if not have_arrow:
raise ImportError("PyArrow >= %s must be installed; however, "
"it was not found." % minimum_pyarrow_version)
if LooseVersion(pyarrow.__version__) < LooseVersion(minimum_pyarrow_version):
raise ImportError("PyArrow >= %s must be installed; however, "
"your version was %s." % (minimum_pyarrow_version, pyarrow.__version__))
def require_test_compiled():
""" Raise Exception if test classes are not compiled
"""
import os
import glob
try:
spark_home = os.environ['SPARK_HOME']
except KeyError:
raise RuntimeError('SPARK_HOME is not defined in environment')
test_class_path = os.path.join(
spark_home, 'sql', 'core', 'target', '*', 'test-classes')
paths = glob.glob(test_class_path)
if len(paths) == 0:
raise RuntimeError(
"%s doesn't exist. Spark sql test classes are not compiled." % test_class_path)
class ForeachBatchFunction(object):
"""
This is the Python implementation of Java interface 'ForeachBatchFunction'. This wraps
the user-defined 'foreachBatch' function such that it can be called from the JVM when
the query is active.
"""
def __init__(self, sql_ctx, func):
self.sql_ctx = sql_ctx
self.func = func
def call(self, jdf, batch_id):
from pyspark.sql.dataframe import DataFrame
try:
self.func(DataFrame(jdf, self.sql_ctx), batch_id)
except Exception as e:
self.error = e
raise e
class Java:
implements = ['org.apache.spark.sql.execution.streaming.sources.PythonForeachBatchFunction']
| mit |
aabadie/scikit-learn | sklearn/neighbors/nearest_centroid.py | 34 | 7347 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
from ..utils.multiclass import check_classification_targets
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
check_classification_targets(y)
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to its members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| bsd-3-clause |
xuewei4d/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 17 | 3334 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1, L2 and Elastic-Net penalty are used for different values of C. We can see
that large values of C give more freedom to the model. Conversely, smaller
values of C constrain the model more. In the L1 penalty case, this leads to
sparser solutions. As expected, the Elastic-Net penalty sparsity is between
that of L1 and L2.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
X, y = datasets.load_digits(return_X_y=True)
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(int)
l1_ratio = 0.5 # L1 weight in the Elastic-Net regularization
fig, axes = plt.subplots(3, 3)
# Set regularization parameter
for i, (C, axes_row) in enumerate(zip((1, 0.1, 0.01), axes)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01, solver='saga')
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01, solver='saga')
clf_en_LR = LogisticRegression(C=C, penalty='elasticnet', solver='saga',
l1_ratio=l1_ratio, tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
clf_en_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
coef_en_LR = clf_en_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
sparsity_en_LR = np.mean(coef_en_LR == 0) * 100
print("C=%.2f" % C)
print("{:<40} {:.2f}%".format("Sparsity with L1 penalty:", sparsity_l1_LR))
print("{:<40} {:.2f}%".format("Sparsity with Elastic-Net penalty:",
sparsity_en_LR))
print("{:<40} {:.2f}%".format("Sparsity with L2 penalty:", sparsity_l2_LR))
print("{:<40} {:.2f}".format("Score with L1 penalty:",
clf_l1_LR.score(X, y)))
print("{:<40} {:.2f}".format("Score with Elastic-Net penalty:",
clf_en_LR.score(X, y)))
print("{:<40} {:.2f}".format("Score with L2 penalty:",
clf_l2_LR.score(X, y)))
if i == 0:
axes_row[0].set_title("L1 penalty")
axes_row[1].set_title("Elastic-Net\nl1_ratio = %s" % l1_ratio)
axes_row[2].set_title("L2 penalty")
for ax, coefs in zip(axes_row, [coef_l1_LR, coef_en_LR, coef_l2_LR]):
ax.imshow(np.abs(coefs.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
ax.set_xticks(())
ax.set_yticks(())
axes_row[0].set_ylabel('C = %s' % C)
plt.show()
| bsd-3-clause |
frucci/kaggle_quora_competition | get_phrase_correction.py | 1 | 5953 | import ourfunctions as f
import gc
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import nltk
import re
from string import punctuation
train_df = pd.read_csv("./train.csv")
test_df = pd.read_csv("./test.csv")
print(train_df.shape)
print(test_df.shape)
## adjusting the nan value
train_df.fillna("", inplace=True)
test_df.fillna("", inplace=True)
train_df.info()
import enchant
import enchant
from enchant.checker import SpellChecker
d = enchant.DictWithPWL("en_US")
chkr = SpellChecker(d)
def create_unique_questions(train_df, test_df):
words = list(
set(
list(set(train_df['question1'])) +
list(set(train_df['question2'])) + list(set(test_df['question1']))
+ list(set(test_df['question2']))))
unique_questions = pd.DataFrame(
words, columns=['questions']).reset_index(drop=True)
return unique_questions
unique_questions = create_unique_questions(train_df, test_df)
alreary_corrected = [
"iphones",
"ireland",
"mustn",
"linux",
"wouldn",
"videoes",
"tv",
"google",
"memorise",
"faught",
"cena",
"lesner",
"hollywood",
"anmount",
"hoywood",
"cantonese",
"otherthan",
"mumbai",
"wikipedia",
"textfields",
"ajax",
"pls",
"couldn",
"calcutta",
"doesnt",
"fIght",
"txt",
"whther",
"feelns",
"sudd",
"stl",
"india",
"Plz",
"engg",
"eng",
"olympians",
"offence",
"bulgarians",
"siemens",
"wasn",
"clinton",
"portland",
"recognise",
"adams",
"didnt",
"taylor",
"youtube",
"goverment",
"korean",
"paypal",
"isn",
"facebook",
"mhz",
"samsung",
"womans",
"german",
"america",
"mosquitos",
"melbourne",
"dj",
"behaviour",
"hasn",
"phd",
"aren",
"ethernet",
"uk",
"realise",
"brisbane",
"organisation",
"aftr",
"russian",
"nonpolar",
"pc",
"othet",
"nokia",
"boolean",
"analyse",
"centres",
"ramadan",
"latin",
"weren",
"immedietly",
"bollywood",
"conentration",
"benifit",
"oppurtunities",
"filipino",
"netflix",
"indians",
"opensource",
"atlanta",
"microsoft",
"colour",
"cse",
"jane",
"exsts",
"persob",
"centre",
"radeon",
"postgraduation",
"suez",
"illuminati",
"analytics",
"italian",
"excercises",
"favour",
"smartphones",
"shouldn",
"didnot",
"friday",
"monday",
"americans",
"hasn",
"michael",
"verizon",
"hitler",
"fermi",
"whatsapp",
"messagess",
"africa",
"weakneses",
"nikon",
"capricorn",
"romania",
"favourite",
"startups",
"spanish",
"preparegravitation",
"compulsary",
"workin",
"syria",
"immigants",
"benedict",
"legssss",
"france",
"watsup",
"arya",
"handjob",
"europe",
"shoud",
"paypal",
"upto",
"paris",
"sql",
"hitman",
"lagrangian",
"dvd",
"donald",
"enigneering",
"mightn",
"defence",
"iranian",
"increse",
"india",
"hairloss",
"volumetry",
"americans",
"quora",
"eligiblty",
"english",
"indian",
"bangalore",
"emoji",
"ielts",
"ahmedabad",
"frac",
"sociall",
"philippines",
"java",
"intraday",
"mightn",
"delhi",
"saturn",
"youtube",
"noida",
"lynda",
"demonetisation",
"html",
"dissprove",
"nlp",
"nlp",
"rollerblade",
"vlc",
"rolex",
"november",
"indians",
"nflammatories",
"absorps",
"kat",
"ibm",
"centra",
"centra",
"uk",
"pdf",
"ebook",
"sydney",
"samsung",
"usa",
"traveller",
"jaipur",
"pablo",
"ebay",
"Ebay",
"EBAY",
"whatsapp",
"imessage",
"slary",
"isis",
"blow",
"eu",
"favourite",
"reactjs",
"pakistan",
"stanford",
"harvard",
"wharton",
"saturn",
"existance",
"gb",
"poeple",
"forex",
"katrina",
"decison",
"snapchat",
"rollerblade",
"mba",
"anime",
"disney",
"schengen",
"singapore",
"ramayan",
"gmail",
"madheshi",
"germany",
"instagram",
"connecticut",
"php",
"reaso",
"japanese",
"gf",
"mumbai",
"robert",
"linkedin",
"maharashtrian",
"bollywood",
"enginnering",
"rattata",
"magikarp",
"islam",
"atleast",
"atleast",
"schengen",
"engeneering",
"casanova",
"feelngs",
"photoshop",
"canada",
"holland",
"hollywood",
"chelsea",
"modernizaton",
"instagrammer",
"thailand",
"chinese",
"corrrect",
"hillary",
"china",
"switzerland",
"imrovement",
"kms",
"undergraduation",
"qoura",
"actived",
"calender",
"bestfriend",
"dna",
"latop",
"permantley",
"connectionn",
"sylabus",
"insititute",
"sequrity",
"addmision",
"begineer",
"wtiter",
"litrate",
"programing",
"programmning",
"demonitization",
"intially",
"unseccessful",
"quikly",
]
i = 0
for q2 in unique_questions.questions.values:
i += 1
chkr.set_text(q2)
for err in chkr:
if not sum([c.isupper() for c in err.word]):
error = err.word
sugs = chkr.suggest(error)
cond = True
if len(sugs) > 2:
cond = (len(sugs[0].split()) == 1) and ('-' not in sugs[1]) and (len(sugs) !=0)
if cond and (error not in alreary_corrected):
print(q2)
print(err.word)
print(sugs, '\n\n')
| gpl-3.0 |
chraibi/cellular_automata | make_fd.py | 1 | 2729 | # Generation of the fundamental diagram by calling some model defined in this repository
# Copyright (C) 2014-2015 Mohcine Chraibi
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# contact: [email protected]
import subprocess
import matplotlib.pyplot as plt
import os
from sys import argv
if len(argv) ==1: # no input is provided
program = "asep_fast.py" # default program to use
else:
program = argv[1]
print("%s starts with %s" % (argv[0], program))
# ----------------------------------------
num_runs = 10
max_pedestrians = 120
sim_steps = 1000
pedestrians = range(1, max_pedestrians)
filename = open("stdout.txt", "w")
# ----------------------------------------
for n in pedestrians:
print("run %s with num_peds %3.3d num_runs %3.3d steps % 3.4d" % (program, n, num_runs, sim_steps))
subprocess.call(["python", program, "-n" "%d" % n, "-N", "%d" % num_runs, "-m", "%d" % sim_steps],
stdout=filename)
# ----------------------------------------
filename.close()
velocities = []
densities = []
# the line should be something like this
# N 1 mean_velocity 1.20 [m/s] density 0.10 [1/m]
filename = open("stdout.txt", "r")
for line in filename:
if line.startswith("N"):
line = line.split()
velocities.append(float(line[3]))
densities.append(float(line[6]))
filename.close()
# -------- plot FD ----------
# rho vs v
fig = plt.figure()
ax = fig.add_subplot(111)
ax.cla()
plt.subplot(211)
plt.plot(densities, velocities, lw=2)
plt.ylim([0, max(velocities)+0.05])
plt.ylabel(r"$v\, [m/s]$", size=20)
plt.xlabel(r"$\rho\, [m^{-1}]$", size=20)
# rho vs J (J=rho*v)
J = [r * v for (r, v) in zip(densities, velocities)]
plt.subplot(212)
plt.plot(densities, J, lw=2)
plt.xlabel(r"$\rho\, [m^{-1}]$", size=20)
plt.ylabel(r"$J\, [s^{-1}]$", size=20)
fig.tight_layout()
print("\n")
for end in ["pdf", "png", "eps"]:
figure_name = os.path.join("figs", "asep_fd.%s" % end)
print("result written in %s" % figure_name)
plt.savefig(figure_name)
| gpl-2.0 |
trungnt13/scikit-learn | sklearn/utils/extmath.py | 142 | 21102 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
out: array-like, shape: (M, N), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N)
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = check_array(X, dtype=np.float)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
| bsd-3-clause |
dudulianangang/vps | FieEneFunc1d.py | 1 | 2973 | import sdf
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
plt.style.use('seaborn-white')
# plt.rcParams['font.family'] = 'sans-serif'
# plt.rcParams['font.sans-serif'] = 'Tahoma'
# # plt.rcParams['font.monospace'] = 'Ubuntu Mono'
# plt.rcParams['font.size'] = 10
# plt.rcParams['axes.labelsize'] = 10
# plt.rcParams['axes.labelweight'] = 'bold'
# plt.rcParams['xtick.labelsize'] = 8
# plt.rcParams['ytick.labelsize'] = 8
# plt.rcParams['legend.fontsize'] = 10
# plt.rcParams['figure.titlesize'] = 12
# constants for normalization
n0 = 1.8e20
me = 9.1e-31
qe = 1.6e-19
ep = 8.9e-12
c = 3e8
wp = np.sqrt(n0*qe*qe/me/ep)
ld = c/wp
e0 = me*c*wp/qe
b0 = e0/c
tt = 1/wp
ts = 50*5
te = 1500
en0 = 0.5*ep*ld**2
# simulation domain
nx = 3500
ny = 3500
lx = 3500
ly = 3500
# figure domain (set by grid)
grid_min_x = 0
grid_max_x = nx
grid_min_y = 0
grid_max_y = ny
Gx = np.linspace(0,lx,nx)
Gy = np.linspace(0,ly,ny)
gx = Gx[grid_min_x:grid_max_x]
gy = Gy[grid_min_y:grid_max_y]
# figure parameters
fs = 16
jetcmap = plt.cm.get_cmap("rainbow", 9) #generate a jet map with 10 values
jet_vals = jetcmap(np.arange(9)) #extract those values as an array
jet_vals[0] = [1.0, 1, 1.0, 1] #change the first value
newcmap = mpl.colors.LinearSegmentedColormap.from_list("newjet", jet_vals)
# define array
sex = np.ones(7)
sey = np.ones(7)
sez = np.ones(7)
sbx = np.ones(7)
sby = np.ones(7)
sbz = np.ones(7)
tfe_cal = np.ones(7)
tfe_sys = np.ones(7)
time = np.ones(7)
# plot function
def plotfig(folder):
file = '/Volumes/yaowp2016/'
for i in range(7):
ii = i*5
fname = file+folder+'/'+str(ii).zfill(4)+'.sdf'
datafile = sdf.read(fname)
# Data = datafile.__dict__[varname].data/norm
Ex = datafile.Electric_Field_Ex.data
Ey = datafile.Electric_Field_Ey.data
Ez = datafile.Electric_Field_Ez.data
Bx = datafile.Magnetic_Field_Bx.data*c
By = datafile.Magnetic_Field_By.data*c
Bz = datafile.Magnetic_Field_Bz.data*c
# data = Data[grid_min_x:grid_max_x,grid_min_y:grid_max_y]
sex[i] = np.sum(Ex**2)*en0
sey[i] = np.sum(Ey**2)*en0
sez[i] = np.sum(Ez**2)*en0
sbx[i] = np.sum(Bx**2)*en0
sby[i] = np.sum(By**2)*en0
sbz[i] = np.sum(Bz**2)*en0
tfe_cal[i] = sex[i]+sey[i]+sez[i]+sbx[i]+sby[i]+sbz[i]
tfe_sys[i] = datafile.Total_Field_Energy_in_Simulation__J_.data
time[i] = i*ts
plt.figure(figsize=(8,5))
ax = plt.subplot()
ax.plot(time, tfe_cal,'o-', lw=2, label='tfe_cal')
ax.plot(time, tfe_sys,'s--', lw=2, label='tfe_sys')
plt.xlabel('time($\omega_{pe}^{-1}$)',fontsize=fs)
plt.ylabel('energy($J$)',fontsize=fs)
plt.legend(loc='best', numpoints=1, fancybox=True)
plt.title('total field energy',fontsize=24,fontstyle='normal')
plt.savefig(file+folder+'/plots/'+'FieldEnergy.png',bbox_inches='tight') # n means normalized
plt.close()
fn = 'njbx0.02' # folder name
# vn = 'Electric_Field_Ez' # variable name
# nm = e0 # normalized constant
plotfig(fn) | apache-2.0 |
hbhzwj/GAD | gad/Experiment/EvalForBotnetDetection.py | 1 | 9139 | #!/usr/bin/env python
""" Evaluate the performance of detector
get the statistical quantify for the hypotheis test
like False Alarm Rate.
"""
from __future__ import print_function, division, absolute_import
import copy, os
import collections
from ..Detector import MEM_FS
from ..Detector import BotDetector
from ..util import update_not_none, plt, np, DataRecorder
from ..util import zdump, zload, Load, get_detect_metric
from ..util import DataEndException
import itertools
import pandas
from .Detect import Detect
class BotnetDetectionEval(Detect):
"""plot ROC curve for the hypothesis test"""
def init_parser(self, parser):
super(BotnetDetectionEval, self).init_parser(parser)
parser.add_argument('--roc_thresholds', default=None, type=Load,
help=("any valid python expression. Thresholds used for get "
"roc curve"))
parser.add_argument('--label_col_name', default=None, type=str,
help="name of the label column")
parser.add_argument('--ip_col_names', default=None,
type=lambda x: x.split(','),
help="name of the ip columns")
@staticmethod
def parse_label(label):
return 'Botnet' in label
def get_ground_truth(self):
label_col_name = self.desc['label_col_name']
ip_col_names = self.desc['ip_col_names']
detect_rg = self.desc.get('detect_rg')
rg_type = self.desc['win_type']
assert len(ip_col_names) <= 2, "at most two IP columns are allowed."
fetch_columns = [label_col_name] + ip_col_names
data_records = self.detector.data_file.data.get_rows(fetch_columns,
rg=detect_rg,
rg_type=rg_type)
ground_truth_bot_ips = set()
all_ips = set()
for row in data_records:
if self.parse_label(row[0]): # is botflow
ground_truth_bot_ips.add(row[1])
ground_truth_bot_ips.add(row[2])
all_ips.add(row[1])
all_ips.add(row[2])
return {
'ground_truth_bot_ips': ground_truth_bot_ips,
'all_ips': all_ips,
}
@staticmethod
def get_detected_ips(label_info, detection):
ips = set()
for i, d in enumerate(detection):
if not d:
continue
ips |= set(label_info['win_ips'][i])
return ips
def eval(self):
thresholds = self.desc['roc_thresholds']
ground_truth = self.get_ground_truth()
self.logger.debug('# of ips in this time frame: %d.' %
(len(ground_truth['all_ips'])))
self.logger.debug('# of bot ips in this time frame: %d.' %
(len(ground_truth['ground_truth_bot_ips'])))
divs = self.detector.record_data['entropy']
divs = np.array(divs, dtype=float) / np.max(divs)
bot_detector_desc = copy.deepcopy(self.desc)
bot_detector_desc.update({
'threshold': 0,
'anomaly_detector': self.detector,
})
bot_detector = BotDetector.SoBotDet(bot_detector_desc)
data_recorder = DataRecorder()
res = np.zeros((len(thresholds), 2))
for i, threshold in enumerate(thresholds):
bot_detector.desc['threshold'] = threshold
self.logger.info('Start to detect with threshold %s ' % (threshold))
result = bot_detector.detect(None, anomaly_detect=False)
tp, fn, tn, fp, sensitivity, specificity = \
get_detect_metric(ground_truth['ground_truth_bot_ips'],
result['detected_bot_ips'],
ground_truth['all_ips'])
tpr = tp * 1.0 / (tp + fn) if (tp + fn) > 0 else float('nan')
fpr = fp * 1.0 / (fp + tn) if (fp + tn) > 0 else float('nan')
data_recorder.add(threshold=threshold, tp=tp, tn=tn, fp=fp, fn=fn,
tpr=tpr, fpr=fpr,
detect_result=result)
data_frame = data_recorder.to_pandas_dataframe()
data_frame.set_index(['threshold'], drop=False)
return {
'metric': data_frame,
'ground_truth_bot_ips': ground_truth['ground_truth_bot_ips'],
'all_ips': ground_truth['all_ips'],
}
def run(self):
self.desc = copy.deepcopy(self.args.config['DETECTOR_DESC'])
update_not_none(self.desc, self.args.__dict__)
self.detect()
return self.eval()
class TimeBasedBotnetDetectionEval(BotnetDetectionEval):
"""Calculate corrected metrics (tTP, tFN, tFP, tTN) for botnet detection.
Please refer to the following paper for the details:
Garcia, Sebastian, et al. 'An empirical comparison of botnet detection
methods.' Computers & Security 45 (2014): 100-123.
"""
def init_parser(self, parser):
super(TimeBasedBotnetDetectionEval, self).init_parser(parser)
parser.add_argument('--timeframe_size', default=None, type=float,
help=("--timeframe_size [float] the size of each time frame."
"Metrics (tTP, tFN, tFP, tTN) will be calculated for "
"each time frame."))
def parse_tuple(s):
return tuple(float(val) for val in
self.desc['timeframe_rg'].split[','])
parser.add_argument('--timeframe_rg', default=None, type=parse_tuple,
help=("comma-separated strings, the first one is start time, "
"the second one is end time. Data in the range will be "
"divided to timeframes for evaluation."))
parser.add_argument('--timeframe_decay_ratio', default=None, type=float,
help="parameter in the exp correction function.")
parser.add_argument('--output_prefix', default=None,
help='prefix for output file')
def get_roc_curve(self, stats):
thresholds = self.desc['roc_thresholds']
if 'threshold' not in stats.columns:
return
data_recorder = DataRecorder()
for threshold in thresholds:
threshold_stats = stats[stats.threshold==threshold]
sum_stats = threshold_stats.sum()
FPR = sum_stats.tFP / (sum_stats.tFP + sum_stats.tTN)
TPR = sum_stats.tTP / (sum_stats.tTP + sum_stats.tFN)
precision = sum_stats.tTP / (sum_stats.tTP + sum_stats.tFP)
f1_score = 2 * precision * TPR / (precision + TPR)
data_recorder.add(threshold=threshold,
FPR=FPR,
TPR=TPR,
precision=precision,
f1_score=f1_score)
return data_recorder.to_pandas_dataframe()
def run(self):
timeframe_rg = self.desc['timeframe_rg']
thresholds = self.desc['roc_thresholds']
assert len(timeframe_rg) == 2, "unknown format of timeframe_rg"
timeframe_size = self.desc['timeframe_size']
timeframe_decay_ratio = self.desc['timeframe_decay_ratio']
cur_time = timeframe_rg[0]
data_recorder = DataRecorder()
timeframe_idx = 0
while cur_time < timeframe_rg[1]:
self.desc['detect_rg'] = [cur_time, cur_time + timeframe_size]
self.detect()
try:
eval_result = self.eval()
except DataEndException:
self.logger.warning('Has read end of the data in evaluation!')
break
metric = eval_result['metric']
bot_ips = eval_result['ground_truth_bot_ips']
bot_ip_num = float(len(bot_ips))
normal_ip_num = float(len(eval_result['all_ips'])) - bot_ip_num
correct_value = np.exp(-1 * timeframe_decay_ratio * timeframe_idx) + 1
tTP = metric.tp * correct_value / bot_ip_num # UPDATE HERE
tFN = metric.fn * correct_value / bot_ip_num
tFP = metric.fp * 1.0 / normal_ip_num
tTN = metric.tn * 1.0 / normal_ip_num
for idx, threshold in enumerate(thresholds):
data_recorder.add(threshold=threshold,
timeframe_idx=timeframe_idx,
tTP=tTP[idx],
tFN=tFN[idx],
tFP=tFP[idx],
tTN=tTN[idx])
cur_time += timeframe_size
timeframe_idx += 1
output_prefix = self.desc.get('output_prefix', 'output_prefix')
timeframe_results = data_recorder.to_pandas_dataframe()
timeframe_results.to_csv(output_prefix + '_time_frame.csv', sep=',')
roc = self.get_roc_curve(data_recorder.to_pandas_dataframe())
if roc is not None:
roc.to_csv(output_prefix + '_roc.csv', sep=',')
return roc
def plot(self, data_recorder):
pass
| gpl-3.0 |
lyndsysimon/osf.io | scripts/analytics/addons.py | 21 | 2200 | # -*- coding: utf-8 -*-
import os
import re
import matplotlib.pyplot as plt
from framework.mongo import database
from website import settings
from website.app import init_app
from .utils import plot_dates, oid_to_datetime, mkdirp
log_collection = database['nodelog']
FIG_PATH = os.path.join(settings.ANALYTICS_PATH, 'figs', 'addons')
mkdirp(FIG_PATH)
ADDONS = [
'box',
'dataverse',
'dropbox',
'figshare',
'github',
'googledrive',
'mendeley',
's3',
'zotero',
]
def get_collection_datetimes(collection, _id='_id', query=None):
query = query or {}
return [
oid_to_datetime(record[_id])
for record in collection.find({}, {_id: True})
]
def analyze_model(model):
dates = get_collection_datetimes(model._storage[0].store)
return {
'dates': dates,
'count': len(dates),
}
def analyze_addon_installs(name):
config = settings.ADDONS_AVAILABLE_DICT[name]
results = {
key: analyze_model(model)
for key, model in config.settings_models.iteritems()
}
return results
def analyze_addon_logs(name):
pattern = re.compile('^{0}'.format(name), re.I)
logs = log_collection.find({'action': {'$regex': pattern}}, {'date': True})
return [
record['date']
for record in logs
]
def analyze_addon(name):
installs = analyze_addon_installs(name)
for model, result in installs.iteritems():
if not result['dates']:
continue
fig = plot_dates(result['dates'])
plt.title('{} configurations: {} ({} total)'.format(name, model, len(result['dates'])))
plt.savefig(os.path.join(FIG_PATH, '{}-installs-{}.png'.format(name, model)))
plt.close()
log_dates = analyze_addon_logs(name)
if not log_dates:
return
fig = plot_dates(log_dates)
plt.title('{} actions ({} total)'.format(name, len(log_dates)))
plt.savefig(os.path.join(FIG_PATH, '{}-actions.png'.format(name)))
plt.close()
def main():
init_app(routes=False)
for addon in ADDONS:
if addon in settings.ADDONS_AVAILABLE_DICT:
analyze_addon(addon)
if __name__ == '__main__':
main()
| apache-2.0 |
mducoffe/fuel | docs/conf.py | 1 | 9405 | # -*- coding: utf-8 -*-
#
# Fuel documentation build configuration file, created by
# sphinx-quickstart2 on Wed Oct 8 17:59:44 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinxcontrib.napoleon',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.graphviz',
'sphinx.ext.intersphinx'
]
intersphinx_mapping = {
'theano': ('http://theano.readthedocs.org/en/latest/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'python': ('http://docs.python.org/3.4', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None)
}
graphviz_dot_args = ['-Gbgcolor=#fcfcfc'] # To match the RTD theme
# Render todo lists
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Fuel'
copyright = u'2014, Université de Montréal'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Fueldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Fuel.tex', u'Fuel Documentation',
u'Université de Montréal', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'fuel', u'Fuel Documentation',
[u'Université de Montréal'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Fuel', u'Fuel Documentation',
u'Université de Montréal', 'Fuel', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
import inspect
from sphinx.ext.autodoc import cut_lines
def skip_abc(app, what, name, obj, skip, options):
return skip or name.startswith('_abc')
def setup(app):
app.connect('autodoc-process-docstring', cut_lines(2, what=['module']))
app.connect('autodoc-skip-member', skip_abc)
| mit |
DonBeo/statsmodels | statsmodels/sandbox/tools/mctools.py | 33 | 17175 | '''Helper class for Monte Carlo Studies for (currently) statistical tests
Most of it should also be usable for Bootstrap, and for MC for estimators.
Takes the sample generator, dgb, and the statistical results, statistic,
as functions in the argument.
Author: Josef Perktold (josef-pktd)
License: BSD-3
TODOs, Design
-------------
If we only care about univariate analysis, i.e. marginal if statistics returns
more than one value, the we only need to store the sorted mcres not the
original res. Do we want to extend to multivariate analysis?
Use distribution function to keep track of MC results, ECDF, non-paramatric?
Large parts are similar to a 2d array of independent multivariate random
variables. Joint distribution is not used (yet).
I guess this is currently only for one sided test statistics, e.g. for
two-sided tests basend on t or normal distribution use the absolute value.
'''
from __future__ import print_function
from statsmodels.compat.python import lrange
import numpy as np
from statsmodels.iolib.table import SimpleTable
#copied from stattools
class StatTestMC(object):
"""class to run Monte Carlo study on a statistical test'''
TODO
print(summary, for quantiles and for histogram
draft in trying out script log
Parameters
----------
dgp : callable
Function that generates the data to be used in Monte Carlo that should
return a new sample with each call
statistic : callable
Function that calculates the test statistic, which can return either
a single statistic or a 1d array_like (tuple, list, ndarray).
see also statindices in description of run
Attributes
----------
many methods store intermediate results
self.mcres : ndarray (nrepl, nreturns) or (nrepl, len(statindices))
Monte Carlo results stored by run
Notes
-----
.. Warning::
This is (currently) designed for a single call to run. If run is
called a second time with different arguments, then some attributes might
not be updated, and, therefore, not correspond to the same run.
.. Warning::
Under Construction, don't expect stability in Api or implementation
Examples
--------
Define a function that defines our test statistic:
def lb(x):
s,p = acorr_ljungbox(x, lags=4)
return np.r_[s, p]
Note lb returns eight values.
Define a random sample generator, for example 500 independently, normal
distributed observations in a sample:
def normalnoisesim(nobs=500, loc=0.0):
return (loc+np.random.randn(nobs))
Create instance and run Monte Carlo. Using statindices=list(range(4)) means that
only the first for values of the return of the statistic (lb) are stored
in the Monte Carlo results.
mc1 = StatTestMC(normalnoisesim, lb)
mc1.run(5000, statindices=list(range(4)))
Most of the other methods take an idx which indicates for which columns
the results should be presented, e.g.
print(mc1.cdf(crit, [1,2,3])[1]
"""
def __init__(self, dgp, statistic):
self.dgp = dgp #staticmethod(dgp) #no self
self.statistic = statistic # staticmethod(statistic) #no self
def run(self, nrepl, statindices=None, dgpargs=[], statsargs=[]):
'''run the actual Monte Carlo and save results
Parameters
----------
nrepl : int
number of Monte Carlo repetitions
statindices : None or list of integers
determines which values of the return of the statistic
functions are stored in the Monte Carlo. Default None
means the entire return. If statindices is a list of
integers, then it will be used as index into the return.
dgpargs : tuple
optional parameters for the DGP
statsargs : tuple
optional parameters for the statistics function
Returns
-------
None, all results are attached
'''
self.nrepl = nrepl
self.statindices = statindices
self.dgpargs = dgpargs
self.statsargs = statsargs
dgp = self.dgp
statfun = self.statistic # name ?
#introspect len of return of statfun,
#possible problems with ndim>1, check ValueError
mcres0 = statfun(dgp(*dgpargs), *statsargs)
self.nreturn = nreturns = len(np.ravel(mcres0))
#single return statistic
if statindices is None:
#self.nreturn = nreturns = 1
mcres = np.zeros(nrepl)
mcres[0] = mcres0
for ii in range(1, repl-1, nreturns):
x = dgp(*dgpargs) #(1e-4+np.random.randn(nobs)).cumsum()
#should I ravel?
mcres[ii] = statfun(x, *statsargs) #unitroot_adf(x, 2,trendorder=0, autolag=None)
#more than one return statistic
else:
self.nreturn = nreturns = len(statindices)
self.mcres = mcres = np.zeros((nrepl, nreturns))
mcres[0] = [mcres0[i] for i in statindices]
for ii in range(1, nrepl-1):
x = dgp(*dgpargs) #(1e-4+np.random.randn(nobs)).cumsum()
ret = statfun(x, *statsargs)
mcres[ii] = [ret[i] for i in statindices]
self.mcres = mcres
def histogram(self, idx=None, critval=None):
'''calculate histogram values
does not do any plotting
I don't remember what I wanted here, looks similar to the new cdf
method, but this also does a binned pdf (self.histo)
'''
if self.mcres.ndim == 2:
if not idx is None:
mcres = self.mcres[:,idx]
else:
raise ValueError('currently only 1 statistic at a time')
else:
mcres = self.mcres
if critval is None:
histo = np.histogram(mcres, bins=10)
else:
if not critval[0] == -np.inf:
bins=np.r_[-np.inf, critval, np.inf]
if not critval[0] == -np.inf:
bins=np.r_[bins, np.inf]
histo = np.histogram(mcres,
bins=np.r_[-np.inf, critval, np.inf])
self.histo = histo
self.cumhisto = np.cumsum(histo[0])*1./self.nrepl
self.cumhistoreversed = np.cumsum(histo[0][::-1])[::-1]*1./self.nrepl
return histo, self.cumhisto, self.cumhistoreversed
#use cache decorator instead
def get_mc_sorted(self):
if not hasattr(self, 'mcressort'):
self.mcressort = np.sort(self.mcres, axis=0)
return self.mcressort
def quantiles(self, idx=None, frac=[0.01, 0.025, 0.05, 0.1, 0.975]):
'''calculate quantiles of Monte Carlo results
similar to ppf
Parameters
----------
idx : None or list of integers
List of indices into the Monte Carlo results (columns) that should
be used in the calculation
frac : array_like, float
Defines which quantiles should be calculated. For example a frac
of 0.1 finds the 10% quantile, x such that cdf(x)=0.1
Returns
-------
frac : ndarray
same values as input, TODO: I should drop this again ?
quantiles : ndarray, (len(frac), len(idx))
the quantiles with frac in rows and idx variables in columns
Notes
-----
rename to ppf ? make frac required
change sequence idx, frac
'''
if self.mcres.ndim == 2:
if not idx is None:
mcres = self.mcres[:,idx]
else:
raise ValueError('currently only 1 statistic at a time')
else:
mcres = self.mcres
self.frac = frac = np.asarray(frac)
mc_sorted = self.get_mc_sorted()[:,idx]
return frac, mc_sorted[(self.nrepl*frac).astype(int)]
def cdf(self, x, idx=None):
'''calculate cumulative probabilities of Monte Carlo results
Parameters
----------
idx : None or list of integers
List of indices into the Monte Carlo results (columns) that should
be used in the calculation
frac : array_like, float
Defines which quantiles should be calculated. For example a frac
of 0.1 finds the 10% quantile, x such that cdf(x)=0.1
Returns
-------
x : ndarray
same as input, TODO: I should drop this again ?
probs : ndarray, (len(x), len(idx))
the quantiles with frac in rows and idx variables in columns
'''
idx = np.atleast_1d(idx).tolist() #assure iterable, use list ?
# if self.mcres.ndim == 2:
# if not idx is None:
# mcres = self.mcres[:,idx]
# else:
# raise ValueError('currently only 1 statistic at a time')
# else:
# mcres = self.mcres
mc_sorted = self.get_mc_sorted()
x = np.asarray(x)
#TODO:autodetect or explicit option ?
if x.ndim > 1 and x.shape[1]==len(idx):
use_xi = True
else:
use_xi = False
x_ = x #alias
probs = []
for i,ix in enumerate(idx):
if use_xi:
x_ = x[:,i]
probs.append(np.searchsorted(mc_sorted[:,ix], x_)/float(self.nrepl))
probs = np.asarray(probs).T
return x, probs
def plot_hist(self, idx, distpdf=None, bins=50, ax=None, kwds=None):
'''plot the histogram against a reference distribution
Parameters
----------
idx : None or list of integers
List of indices into the Monte Carlo results (columns) that should
be used in the calculation
distpdf : callable
probability density function of reference distribution
bins : integer or array_like
used unchanged for matplotlibs hist call
ax : TODO: not implemented yet
kwds : None or tuple of dicts
extra keyword options to the calls to the matplotlib functions,
first dictionary is for his, second dictionary for plot of the
reference distribution
Returns
-------
None
'''
if kwds is None:
kwds = ({},{})
if self.mcres.ndim == 2:
if not idx is None:
mcres = self.mcres[:,idx]
else:
raise ValueError('currently only 1 statistic at a time')
else:
mcres = self.mcres
lsp = np.linspace(mcres.min(), mcres.max(), 100)
import matplotlib.pyplot as plt
#I don't want to figure this out now
# if ax=None:
# fig = plt.figure()
# ax = fig.addaxis()
fig = plt.figure()
plt.hist(mcres, bins=bins, normed=True, **kwds[0])
plt.plot(lsp, distpdf(lsp), 'r', **kwds[1])
def summary_quantiles(self, idx, distppf, frac=[0.01, 0.025, 0.05, 0.1, 0.975],
varnames=None, title=None):
'''summary table for quantiles (critical values)
Parameters
----------
idx : None or list of integers
List of indices into the Monte Carlo results (columns) that should
be used in the calculation
distppf : callable
probability density function of reference distribution
TODO: use `crit` values instead or additional, see summary_cdf
frac : array_like, float
probabilities for which
varnames : None, or list of strings
optional list of variable names, same length as idx
Returns
-------
table : instance of SimpleTable
use `print(table` to see results
'''
idx = np.atleast_1d(idx) #assure iterable, use list ?
quant, mcq = self.quantiles(idx, frac=frac)
#not sure whether this will work with single quantile
#crit = stats.chi2([2,4]).ppf(np.atleast_2d(quant).T)
crit = distppf(np.atleast_2d(quant).T)
mml=[]
for i, ix in enumerate(idx): #TODO: hardcoded 2 ?
mml.extend([mcq[:,i], crit[:,i]])
#mmlar = np.column_stack(mml)
mmlar = np.column_stack([quant] + mml)
#print(mmlar.shape
if title:
title = title +' Quantiles (critical values)'
else:
title='Quantiles (critical values)'
#TODO use stub instead
if varnames is None:
varnames = ['var%d' % i for i in range(mmlar.shape[1]//2)]
headers = ['\nprob'] + ['%s\n%s' % (i, t) for i in varnames for t in ['mc', 'dist']]
return SimpleTable(mmlar,
txt_fmt={'data_fmts': ["%#6.3f"]+["%#10.4f"]*(mmlar.shape[1]-1)},
title=title,
headers=headers)
def summary_cdf(self, idx, frac, crit, varnames=None, title=None):
'''summary table for cumulative density function
Parameters
----------
idx : None or list of integers
List of indices into the Monte Carlo results (columns) that should
be used in the calculation
frac : array_like, float
probabilities for which
crit : array_like
values for which cdf is calculated
varnames : None, or list of strings
optional list of variable names, same length as idx
Returns
-------
table : instance of SimpleTable
use `print(table` to see results
'''
idx = np.atleast_1d(idx) #assure iterable, use list ?
mml=[]
#TODO:need broadcasting in cdf
for i in range(len(idx)):
#print(i, mc1.cdf(crit[:,i], [idx[i]])[1].ravel()
mml.append(self.cdf(crit[:,i], [idx[i]])[1].ravel())
#mml = self.cdf(crit, idx)[1]
#mmlar = np.column_stack(mml)
#print(mml[0].shape, np.shape(frac)
mmlar = np.column_stack([frac] + mml)
#print(mmlar.shape
if title:
title = title +' Probabilites'
else:
title='Probabilities'
#TODO use stub instead
#headers = ['\nprob'] + ['var%d\n%s' % (i, t) for i in range(mmlar.shape[1]-1) for t in ['mc']]
if varnames is None:
varnames = ['var%d' % i for i in range(mmlar.shape[1]-1)]
headers = ['prob'] + varnames
return SimpleTable(mmlar,
txt_fmt={'data_fmts': ["%#6.3f"]+["%#10.4f"]*(np.array(mml).shape[1]-1)},
title=title,
headers=headers)
if __name__ == '__main__':
from scipy import stats
from statsmodels.iolib.table import SimpleTable
from statsmodels.sandbox.stats.diagnostic import (
acorr_ljungbox, unitroot_adf)
def randwalksim(nobs=100, drift=0.0):
return (drift+np.random.randn(nobs)).cumsum()
def normalnoisesim(nobs=500, loc=0.0):
return (loc+np.random.randn(nobs))
def adf20(x):
return unitroot_adf(x, 2,trendorder=0, autolag=None)
# print('\nResults with MC class'
# mc1 = StatTestMC(randwalksim, adf20)
# mc1.run(1000)
# print(mc1.histogram(critval=[-3.5, -3.17, -2.9 , -2.58, 0.26])
# print(mc1.quantiles()
print('\nLjung Box')
from statsmodels.sandbox.stats.diagnostic import acorr_ljungbox
def lb4(x):
s,p = acorr_ljungbox(x, lags=4)
return s[-1], p[-1]
def lb1(x):
s,p = acorr_ljungbox(x, lags=1)
return s[0], p[0]
def lb(x):
s,p = acorr_ljungbox(x, lags=4)
return np.r_[s, p]
print('Results with MC class')
mc1 = StatTestMC(normalnoisesim, lb)
mc1.run(10000, statindices=lrange(8))
print(mc1.histogram(1, critval=[0.01, 0.025, 0.05, 0.1, 0.975]))
print(mc1.quantiles(1))
print(mc1.quantiles(0))
print(mc1.histogram(0))
#print(mc1.summary_quantiles([1], stats.chi2([2]).ppf, title='acorr_ljungbox')
print(mc1.summary_quantiles([1,2,3], stats.chi2([2,3,4]).ppf,
varnames=['lag 1', 'lag 2', 'lag 3'],
title='acorr_ljungbox'))
print(mc1.cdf(0.1026, 1))
print(mc1.cdf(0.7278, 3))
print(mc1.cdf(0.7278, [1,2,3]))
frac = [0.01, 0.025, 0.05, 0.1, 0.975]
crit = stats.chi2([2,4]).ppf(np.atleast_2d(frac).T)
print(mc1.summary_cdf([1,3], frac, crit, title='acorr_ljungbox'))
crit = stats.chi2([2,3,4]).ppf(np.atleast_2d(frac).T)
print(mc1.summary_cdf([1,2,3], frac, crit,
varnames=['lag 1', 'lag 2', 'lag 3'],
title='acorr_ljungbox'))
print(mc1.cdf(crit, [1,2,3])[1].shape)
#fixed broadcasting in cdf Done 2d only
'''
>>> mc1.cdf(crit[:,0], [1])[1].shape
(5, 1)
>>> mc1.cdf(crit[:,0], [1,3])[1].shape
(5, 2)
>>> mc1.cdf(crit[:,:], [1,3])[1].shape
(2, 5, 2)
'''
doplot=0
if doplot:
import matplotlib.pyplot as plt
mc1.plot_hist(0,stats.chi2(2).pdf) #which pdf
plt.show()
| bsd-3-clause |
francisco-dlp/hyperspy | setup.py | 1 | 14008 | # -*- coding: utf-8 -*-
# Copyright 2007-2011 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import hyperspy.Release as Release
from distutils.errors import CompileError, DistutilsPlatformError
import distutils.ccompiler
import distutils.sysconfig
import itertools
import subprocess
import os
import warnings
from setuptools import setup, Extension, Command
import sys
v = sys.version_info
if v[0] != 3:
error = "ERROR: From version 0.8.4 HyperSpy requires Python 3. " \
"For Python 2.7 install Hyperspy 0.8.3 e.g. " \
"$ pip install --upgrade hyperspy==0.8.3"
print(error, file=sys.stderr)
sys.exit(1)
# stuff to check presence of compiler:
setup_path = os.path.dirname(__file__)
install_req = ['scipy>=0.15',
'matplotlib>=2.2.3',
'numpy>=1.10, !=1.13.0',
'traits>=4.5.0',
'natsort',
'requests',
'tqdm>=0.4.9',
'sympy',
'dill',
'h5py>=2.3',
'python-dateutil>=2.5.0',
'ipyparallel',
'dask[array]>=0.18',
'scikit-image>=0.13',
'pint>=0.8',
'statsmodels',
'numexpr',
'sparse',
'imageio',
]
extras_require = {
"learning": ['scikit-learn'],
"gui-jupyter": ["hyperspy_gui_ipywidgets>=1.1.0"],
"gui-traitsui": ["hyperspy_gui_traitsui>=1.1.0"],
"mrcz": ["blosc>=1.5", 'mrcz>=0.3.6'],
"speed": ["numba", "cython"],
# bug in pip: matplotib is ignored here because it is already present in
# install_requires.
"tests": ["pytest>=3.6", "pytest-mpl", "matplotlib>=3.1"], # for testing
# required to build the docs
"build-doc": ["sphinx>=1.7", "sphinx_rtd_theme"],
}
# Don't include "tests" and "docs" requirements since "all" is designed to be
# used for user installation.
runtime_extras_require = {x: extras_require[x] for x in extras_require.keys()
if x not in ["tests", "build-doc"]}
extras_require["all"] = list(itertools.chain(*list(
runtime_extras_require.values())))
extras_require["dev"] = list(itertools.chain(*list(extras_require.values())))
def update_version(version):
release_path = "hyperspy/Release.py"
lines = []
with open(release_path, "r") as f:
for line in f:
if line.startswith("version = "):
line = "version = \"%s\"\n" % version
lines.append(line)
with open(release_path, "w") as f:
f.writelines(lines)
# Extensions. Add your extension here:
raw_extensions = [Extension("hyperspy.io_plugins.unbcf_fast",
[os.path.join('hyperspy', 'io_plugins', 'unbcf_fast.pyx')]),
]
cleanup_list = []
for leftover in raw_extensions:
path, ext = os.path.splitext(leftover.sources[0])
if ext in ('.pyx', '.py'):
cleanup_list.append(''.join([os.path.join(setup_path, path), '.c*']))
if os.name == 'nt':
bin_ext = '.cpython-*.pyd'
else:
bin_ext = '.cpython-*.so'
cleanup_list.append(''.join([os.path.join(setup_path, path), bin_ext]))
def count_c_extensions(extensions):
c_num = 0
for extension in extensions:
# if first source file with extension *.c or *.cpp exists
# it is cythonised or pure c/c++ extension:
sfile = extension.sources[0]
path, ext = os.path.splitext(sfile)
if os.path.exists(path + '.c') or os.path.exists(path + '.cpp'):
c_num += 1
return c_num
def cythonize_extensions(extensions):
try:
from Cython.Build import cythonize
return cythonize(extensions)
except ImportError:
warnings.warn("""WARNING: cython required to generate fast c code is not found on this system.
Only slow pure python alternative functions will be available.
To use fast implementation of some functions writen in cython either:
a) install cython and re-run the installation,
b) try alternative source distribution containing cythonized C versions of fast code,
c) use binary distribution (i.e. wheels, egg).""")
return []
def no_cythonize(extensions):
for extension in extensions:
sources = []
for sfile in extension.sources:
path, ext = os.path.splitext(sfile)
if ext in ('.pyx', '.py'):
if extension.language == 'c++':
ext = '.cpp'
else:
ext = '.c'
sfile = path + ext
sources.append(sfile)
extension.sources[:] = sources
return extensions
# to cythonize, or not to cythonize... :
if len(raw_extensions) > count_c_extensions(raw_extensions):
extensions = cythonize_extensions(raw_extensions)
else:
extensions = no_cythonize(raw_extensions)
# to compile or not to compile... depends if compiler is present:
compiler = distutils.ccompiler.new_compiler()
assert isinstance(compiler, distutils.ccompiler.CCompiler)
distutils.sysconfig.customize_compiler(compiler)
try:
compiler.compile([os.path.join(setup_path, 'hyperspy', 'misc', 'etc',
'test_compilers.c')])
except (CompileError, DistutilsPlatformError):
warnings.warn("""WARNING: C compiler can't be found.
Only slow pure python alternative functions will be available.
To use fast implementation of some functions writen in cython/c either:
a) check that you have compiler (EXACTLY SAME as your python
distribution was compiled with) installed,
b) use binary distribution of hyperspy (i.e. wheels, egg, (only osx and win)).
Installation will continue in 5 sec...""")
extensions = []
from time import sleep
sleep(5) # wait 5 secs for user to notice the message
class Recythonize(Command):
"""cythonize all extensions"""
description = "(re-)cythonize all changed cython extensions"
user_options = []
def initialize_options(self):
"""init options"""
pass
def finalize_options(self):
"""finalize options"""
pass
def run(self):
# if there is no cython it is supposed to fail:
from Cython.Build import cythonize
global raw_extensions
global extensions
cythonize(extensions)
class update_version_when_dev:
def __enter__(self):
self.release_version = Release.version
# Get the hash from the git repository if available
self.restore_version = False
if self.release_version.endswith(".dev"):
p = subprocess.Popen(["git", "describe",
"--tags", "--dirty", "--always"],
stdout=subprocess.PIPE,
shell=True)
stdout = p.communicate()[0]
if p.returncode != 0:
# Git is not available, we keep the version as is
self.restore_version = False
self.version = self.release_version
else:
gd = stdout[1:].strip().decode()
# Remove the tag
gd = gd[gd.index("-") + 1:]
self.version = self.release_version + "+git."
self.version += gd.replace("-", ".")
update_version(self.version)
self.restore_version = True
else:
self.version = self.release_version
return self.version
def __exit__(self, type, value, traceback):
if self.restore_version is True:
update_version(self.release_version)
with update_version_when_dev() as version:
setup(
name="hyperspy",
package_dir={'hyperspy': 'hyperspy'},
version=version,
ext_modules=extensions,
packages=['hyperspy',
'hyperspy.datasets',
'hyperspy._components',
'hyperspy.datasets',
'hyperspy.io_plugins',
'hyperspy.docstrings',
'hyperspy.drawing',
'hyperspy.drawing._markers',
'hyperspy.drawing._widgets',
'hyperspy.learn',
'hyperspy._signals',
'hyperspy.utils',
'hyperspy.tests',
'hyperspy.tests.axes',
'hyperspy.tests.component',
'hyperspy.tests.datasets',
'hyperspy.tests.drawing',
'hyperspy.tests.io',
'hyperspy.tests.model',
'hyperspy.tests.mva',
'hyperspy.tests.samfire',
'hyperspy.tests.signal',
'hyperspy.tests.utils',
'hyperspy.tests.misc',
'hyperspy.models',
'hyperspy.misc',
'hyperspy.misc.eels',
'hyperspy.misc.eds',
'hyperspy.misc.io',
'hyperspy.misc.holography',
'hyperspy.misc.machine_learning',
'hyperspy.external',
'hyperspy.external.mpfit',
'hyperspy.external.astroML',
'hyperspy.samfire_utils',
'hyperspy.samfire_utils.segmenters',
'hyperspy.samfire_utils.weights',
'hyperspy.samfire_utils.goodness_of_fit_tests',
],
install_requires=install_req,
tests_require=["pytest>=3.0.2"],
extras_require=extras_require,
package_data={
'hyperspy':
[
'tests/drawing/*.png',
'tests/drawing/data/*.hspy',
'tests/drawing/plot_signal/*.png',
'tests/drawing/plot_signal1d/*.png',
'tests/drawing/plot_signal2d/*.png',
'tests/drawing/plot_markers/*.png',
'tests/drawing/plot_model1d/*.png',
'tests/drawing/plot_model/*.png',
'tests/drawing/plot_roi/*.png',
'misc/eds/example_signals/*.hdf5',
'misc/holography/example_signals/*.hdf5',
'tests/drawing/plot_mva/*.png',
'tests/drawing/plot_signal/*.png',
'tests/drawing/plot_signal1d/*.png',
'tests/drawing/plot_signal2d/*.png',
'tests/drawing/plot_markers/*.png',
'tests/drawing/plot_widgets/*.png',
'tests/drawing/plot_signal_tools/*.png',
'tests/io/blockfile_data/*.blo',
'tests/io/dens_data/*.dens',
'tests/io/dm_stackbuilder_plugin/test_stackbuilder_imagestack.dm3',
'tests/io/dm3_1D_data/*.dm3',
'tests/io/dm3_2D_data/*.dm3',
'tests/io/dm3_3D_data/*.dm3',
'tests/io/dm4_1D_data/*.dm4',
'tests/io/dm4_2D_data/*.dm4',
'tests/io/dm4_3D_data/*.dm4',
'tests/io/dm3_locale/*.dm3',
'tests/io/FEI_new/*.emi',
'tests/io/FEI_new/*.ser',
'tests/io/FEI_new/*.npy',
'tests/io/FEI_old/*.emi',
'tests/io/FEI_old/*.ser',
'tests/io/FEI_old/*.npy',
'tests/io/msa_files/*.msa',
'tests/io/hdf5_files/*.hdf5',
'tests/io/hdf5_files/*.hspy',
'tests/io/tiff_files/*.tif',
'tests/io/tiff_files/*.dm3',
'tests/io/npy_files/*.npy',
'tests/io/unf_files/*.unf',
'tests/io/bruker_data/*.bcf',
'tests/io/bruker_data/*.json',
'tests/io/bruker_data/*.npy',
'tests/io/bruker_data/*.spx',
'tests/io/ripple_files/*.rpl',
'tests/io/ripple_files/*.raw',
'tests/io/emd_files/*.emd',
'tests/io/emd_files/fei_emd_files.zip',
'tests/io/protochips_data/*.npy',
'tests/io/protochips_data/*.csv',
'tests/signal/test_find_peaks1D_ohaver/test_find_peaks1D_ohaver.hdf5',
],
},
author=Release.authors['all'][0],
author_email=Release.authors['all'][1],
maintainer='Francisco de la Peña',
maintainer_email='[email protected]',
description=Release.description,
long_description=open('README.rst').read(),
license=Release.license,
platforms=Release.platforms,
url=Release.url,
keywords=Release.keywords,
cmdclass={
'recythonize': Recythonize,
},
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Natural Language :: English",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Physics",
],
)
| gpl-3.0 |
UPSduck/DataScan | ImageProcessing.py | 1 | 2935 | import numpy as np
from matplotlib import pyplot as plt
import cv2
# Capture an Image
camera_port = 0
ramp_frames = 30
camera = cv2.VideoCapture(camera_port)
def get_image():
retval, im = camera.read()
return im
for i in xrange(ramp_frames):
temp = get_image()
print("Taking image...")
camera_capture = get_image()
file = "captured_image.png"
cv2.imwrite(file, camera_capture)
del(camera)
def ImgScale(image,newSize):
r = float(newSize) / image.shape[1]
dim = (int(newSize), int(image.shape[0] * r))
image = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
return image
def ImgThresh(image,thresh=0):
if thresh != 0:
ret,newImage = cv2.threshold(image,thresh,255,cv2.THRESH_BINARY)
else:
ret,newImage = cv2.threshold(image,0,255,cv2.THRESH_OTSU)
return newImage
def FindCenter(contour):
(x,y),radius = cv2.minEnclosingCircle(contour)
center = (int(x),int(y))
return center
def FindMark(contours,mark=0):
l = len(contours)
c = mark
while l > c:
(x1,y1) = FindCenter(contours[c-1])
(x,y) = FindCenter(contours[c])
difference = (x1-x)+(y1-y)
print 'Center Difference:',(x1-x)+(y1-y),''
if bufValue*-1 < difference < bufValue:
mark = int(c)
c = l+1
print 'Mark Found at array',mark
print 'Verrifying...'
ConfirmMark(contours,mark)
else:
c = c + 1
if l == c:
print 'No Marker Found'
return mark
def ConfirmMark(contours,mark):
A1 = cv2.contourArea(contours[mark])+1
A2 = cv2.contourArea(contours[mark-1])+1
KEY = 0
if A1 < A2:
KEY = A2/A1
else:
KEY = A1/A2
if 6.5 < KEY < 8.5:
print 'KEY:',KEY
print 'Mark VERIFIED!'
else:
print 'FAILED'
print KEY , 'Search for new mark...'
FindMark(contours,mark+1)
def DrawMark(image,contours,mark,I=255,border=2):
rect = cv2.minAreaRect(contours[mark])
box = cv2.cv.BoxPoints(rect)
box = np.int0(box)
#Get Corners for box 2
rect1 = cv2.minAreaRect(contours[mark-1])
box1 = cv2.cv.BoxPoints(rect1)
box1 = np.int0(box1)
#Draw
cv2.drawContours(image,[box],0,(I,I,I),border)
cv2.drawContours(image,[box1],0,(I,I,I),border)
scale = 5
bufValue = 1.5*scale
wipSize = 100 * scale
finSize = 500
blur = 1
minEdge = 100
maxEdge = 200
# Process Image
imgOrig = cv2.imread(file,0) #imgPath for sample file for camera
imgScaled = ImgScale(imgOrig,wipSize)
imgBlured = cv2.GaussianBlur(imgScaled, (blur, blur), 0)
imgThreshed = ImgThresh(imgBlured)
imgEdged = cv2.Canny(imgThreshed,minEdge,maxEdge)
#Create and sort Contours
contours, hierarchy = cv2.findContours(imgThreshed,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cntsArea = sorted(contours, key = cv2.contourArea, reverse = True)[:50]
cntsCent = sorted(cntsArea, key = FindCenter)
#Find Mark
mark = FindMark(cntsCent)
DrawMark(imgScaled,cntsCent,mark,255,2)
#display Marker
plt.imshow(imgScaled,'gray')
plt.title('marker')
plt.xticks([]),plt.yticks([])
plt.show()
| gpl-2.0 |
mrcslws/htmresearch | htmresearch/frameworks/union_temporal_pooling/activation/excite_functions/excite_functions_all.py | 9 | 3762 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
import matplotlib.pyplot as plt
from excite_function_base import ExciteFunctionBase
class LogisticExciteFunction(ExciteFunctionBase):
"""
Implementation of a logistic activation function for activation updating.
Specifically, the function has the following form:
f(x) = (maxValue - minValue) / (1 + exp(-steepness * (x - xMidpoint) ) ) + minValue
Note: The excitation rate is linear. The activation function is
logistic.
"""
def __init__(self, xMidpoint=5, minValue=10, maxValue=20, steepness=1):
"""
@param xMidpoint: Controls where function output is half of 'maxValue,'
i.e. f(xMidpoint) = maxValue / 2
@param minValue: Minimum value of the function
@param maxValue: Controls the maximum value of the function's range
@param steepness: Controls the steepness of the "middle" part of the
curve where output values begin changing rapidly.
Must be a non-zero value.
"""
assert steepness != 0
self._xMidpoint = xMidpoint
self._maxValue = maxValue
self._minValue = minValue
self._steepness = steepness
def excite(self, currentActivation, inputs):
"""
Increases current activation by amount.
@param currentActivation (numpy array) Current activation levels for each cell
@param inputs (numpy array) inputs for each cell
"""
currentActivation += self._minValue + (self._maxValue - self._minValue) / (
1 + numpy.exp(-self._steepness * (inputs - self._xMidpoint)))
return currentActivation
def plot(self):
"""
plot the activation function
"""
plt.ion()
plt.show()
x = numpy.linspace(0, 15, 100)
y = numpy.zeros(x.shape)
y = self.excite(y, x)
plt.plot(x, y)
plt.xlabel('Input')
plt.ylabel('Persistence')
plt.title('Sigmoid Activation Function')
class FixedExciteFunction(ExciteFunctionBase):
"""
Implementation of a simple fixed excite function
The function reset the activation level to a fixed amount
"""
def __init__(self, targetExcLevel=10.0):
"""
"""
self._targetExcLevel = targetExcLevel
def excite(self, currentActivation, inputs):
"""
Increases current activation by a fixed amount.
@param currentActivation (numpy array) Current activation levels for each cell
@param inputs (numpy array) inputs for each cell
"""
currentActivation += self._targetExcLevel
return currentActivation
def plot(self):
"""
plot the activation function
"""
plt.ion()
plt.show()
x = numpy.linspace(0, 15, 100)
y = numpy.zeros(x.shape)
y = self.excite(y, x)
plt.plot(x, y)
plt.xlabel('Input')
plt.ylabel('Persistence')
| agpl-3.0 |
iliavolyova/evo-clustering | src/core.py | 1 | 20837 | from __future__ import division
from matplotlib.pyplot import colormaps
import textwrap
import random
import math
import os
import numpy as np
from scipy import spatial
from sklearn.cluster import *
import time
import log
from sklearn import metrics
from sklearn import preprocessing
from dataset import *
class Config:
def __init__(self, params):
self.dataset = self.create_dataset(params['Dataset'])
self.n_dims = self.dataset.getColNum() # dimenzija podataka
self.k_max = params['Max clusters']
self.velicina_populacije = params['Population size']
self.trajanje_svijeta = params['Number of generations']
self.fitness_metoda = params['Fitness method']
self.dist_metoda = params['Distance measure']
self.db_param_q = params['q']
self.db_param_t = params['t']
self.weights_on = params['Feature significance'] or False
self.weights = self.dataset.params['Feature weights']
if self.dist_metoda == 'Mahalanobis':
self.inv_cov = np.linalg.inv(np.cov(self.dataset.data, rowvar=0))
def dist(self, a, b):
if self.dist_metoda == 'Minkowski_2':
return spatial.distance.minkowski(a, b, 2)
elif self.dist_metoda == 'Mahalanobis':
return spatial.distance.mahalanobis(a, b, self.inv_cov)
elif self.dist_metoda == 'Cosine':
return spatial.distance.cosine(a, b) if a[0] <= 1 and a[1] <= 1 and b[1] <= 1 and b[0] <= 1 else 100
def dist_weighted(self, a, b):
if self.dist_metoda == 'Minkowski_2':
return spatial.distance.wminkowski(a, b, p=2, w=self.weights)
elif self.dist_metoda == 'Mahalanobis':
return spatial.distance.mahalanobis(a, b, self.inv_cov)
elif self.dist_metoda == 'Cosine':
return spatial.distance.cosine(a, b) if a[0] <= 1 and a[1] <= 1 and b[1] <= 1 and b[0] <= 1 else 100
def dist_db(self, a, b):
if self.weights_on:
return self.dist_weighted(a,b)
else:
return self.dist(a, b)
def dist_cs(self, a, b):
if self.weights_on:
return self.dist_weighted(a,b)
else:
return self.dist(a, b)
def crossover_rate(self, t): # t jer se vremenom spusta
return 0.5 + 0.5 * (self.trajanje_svijeta - t) / self.trajanje_svijeta
def scale_factor(self):
return 0.25 *( 0.5 + random.random() *0.5)
def create_dataset(self, dataset):
if dataset == 'Iris': return Iris()
elif dataset == 'Glass' : return Glass()
elif dataset == 'Wine' : return Wine()
elif dataset == 'Breast cancer' : return Cancer()
elif dataset == 'Naive' : return Naive()
class Core:
def __init__(self, config):
self.config = config
self.p = Populacija(self.config)
self.cycles = 0
self.staro = []
def setConfig(self, config):
self.config = config
def cycle(self):
if self.cycles < self.config.trajanje_svijeta:
self.p.evoluiraj(self.cycles)
fitnessi = np.array([kr.fitness() for kr in self.p.trenutna_generacija])
najkrom = np.argmax(fitnessi)
grupiranje = self.p.trenutna_generacija[najkrom].pridruzivanje()
colormap = self.p.trenutna_generacija[najkrom].grupiranje()
centri = self.p.trenutna_generacija[najkrom].centri_kromosoma()
print self.p.trenutna_generacija[najkrom].geni[0:self.config.k_max]
print colormap
self.staro = grupiranje
self.cycles +=1
return CycleResult(colormap, fitnessi, centri)
class CycleResult():
def __init__(self, colormap, fitnessmap, centroids):
self.colormap = colormap
self.fitnessmap = fitnessmap
self.centroids = centroids
class Kromosom:
geni = []
def koord_centra(self, n):
return [self.geni[self.config.k_max + self.config.n_dims * n + d] for d in range(self.config.n_dims)]
def __init__(self, config, g=[], nepravi=False):
self.config = config
if nepravi: return
if len(g):
self.geni = g
else:
# aktivacije klastera
self.geni = [0.5 + random.random() * 0.5 for cluster in range(config.k_max)]
# koordinate klastera
self.geni.extend(
[random.random()
for dim in range(config.n_dims) # za svaku dimenziju
for k in range(config.k_max)] # za svaki centar klastera
)
for ccluster in range(config.k_max):
if self.geni[ccluster] < 0:
self.geni[ccluster] = 0
elif self.geni[ccluster] > 1:
self.geni[ccluster] = 1
centar = self.koord_centra(ccluster)
if min(centar) >= 0 and max(centar) <= 1:
continue
centar_kvadranta = np.array([0.5 for _ in range(self.config.n_dims)])
razlika = np.subtract(np.array(centar), centar_kvadranta)
norma = np.linalg.norm(razlika) * 2
razlika /= norma
razlika += centar_kvadranta
for d in range(self.config.n_dims):
self.geni[self.config.k_max + self.config.n_dims * ccluster + d] = razlika[d]
provjereno_ispravno = False
while not provjereno_ispravno:
for ccluster in range(config.k_max):
if self.geni[ccluster] < 0:
self.geni[ccluster] = random.random() * 0.4 + 0.05
elif self.geni[ccluster] > 1:
self.geni[ccluster] = 1 - (random.random() * 0.4 + 0.05)
provjereno_ispravno = True
# sve odavde do kraja funkcije problematicno
aktivnih = self.aktivnih_centara()
if aktivnih < 2:
for ispravak in random.sample(range(config.k_max), int(2 + random.random() * (config.k_max - 2)) ):
#for ispravak in random.sample(range(config.k_max), 2 ):
self.geni[ispravak] = 0.5 + 0.5 * random.random()
aktivnih = self.aktivnih_centara()
particija = self.pridruzivanje(ukljuci_neaktivne_centre=True)
# particija_neprazno = [p for p in particija if p != []]
ispravnih = sum([len(grupa) >= 2 and self.geni[ig] > 0.5 for ig, grupa in enumerate(particija)])
if ispravnih >= 2:
# gasimo neispravne, dovoljno je ispravnih
for i, gr in enumerate(particija):
if len(gr) < 2 and self.geni[i] > 0:
self.geni[i] = random.random() * 0.4 + 0.05
provjereno_ispravno = True
else:
provjereno_ispravno = False
if not provjereno_ispravno:
tocaka = self.config.dataset.getRowNum()
po_grupi = tocaka // aktivnih
ostaci = tocaka % aktivnih
prva_iduca_tocka = 0
for grupa in range(config.k_max):
if self.geni[grupa] > 0.5:
krajnja = prva_iduca_tocka + po_grupi + (ostaci > 0)
ostaci -= 1
centar = np.average(self.config.dataset.data[prva_iduca_tocka:krajnja], axis=0)
prva_iduca_tocka = krajnja
self.geni[config.k_max + config.n_dims * grupa:
config.k_max + config.n_dims * (1 + grupa)] = centar
if len(self.geni) != config.k_max + config.k_max * config.n_dims:
print("probl")
particija = self.pridruzivanje()
particija = [x for x in particija if len(x) > 1]
K = len(particija)
if K <= 1:
print 'uh oh'
def centri_kromosoma(self, samo_aktivni = True):
return [[self.geni[self.config.k_max + self.config.n_dims * i + dim] for dim in range(self.config.n_dims)]
for i in range(self.config.k_max) if (not samo_aktivni) or self.geni[i] > 0.5]
def aktivnih_centara(self):
return sum([1 for centar in range(self.config.k_max) if self.geni[centar] > 0.5])
def pridruzivanje(self, ukljuci_neaktivne_centre = False):
if ukljuci_neaktivne_centre:
centri = self.centri_kromosoma(samo_aktivni=False)
#aktivni_centri = self.centri_kromosoma(samo_aktivni=True)
troll_tocka = [1000000 for dim in range(len(self.config.dataset.data[0]))]
centri = [c if self.geni[ic] > 0.5 else troll_tocka for ic, c in enumerate(centri)]
p = [[] for _ in centri]
for t in self.config.dataset.data:
najbl = np.argmin([self.config.dist(c, t) for c in centri])
p[najbl].append(t)
return p
else:
centri = self.centri_kromosoma()
p = [[] for _ in centri]
for t in self.config.dataset.data:
najbl = np.argmin([self.config.dist(c, t) for c in centri])
p[najbl].append(t)
return p
def grupiranje(self):
colormap = np.zeros(len(self.config.dataset.data), dtype=int)
centri = self.centri_kromosoma()
for t in self.config.dataset.data:
najbl = np.argmin([self.config.dist(c, t) for c in centri])
colormap[self.config.dataset.data.index(t)] = najbl
return colormap
def fitness_db(self, particija=[]):
if not particija:
particija = self.pridruzivanje()
particija = [x for x in particija if x != []]
K = len(particija)
if K <= 1:
print 'uh oh'
centri = [np.average(grupa, axis=0) for grupa in particija]
#rasprsenja
S = [math.pow(sum([self.config.dist_db(t, centri[igrupa]) ** self.config.db_param_q for t in grupa]) / len(grupa),
1 / self.config.db_param_q)
for igrupa, grupa in enumerate(particija)]
return sum(
[max(
[(S[igrupa] + S[igrupa2]) /
spatial.distance.minkowski(centri[igrupa], centri[igrupa2], self.config.db_param_t)
for igrupa2, grupa2 in enumerate(particija) if igrupa != igrupa2])
for igrupa, grupa in enumerate(particija)]
) / K
def fitness_cs(self, particija=[]):
if not len(particija):
particija1 = self.pridruzivanje()
particija1 = [x for x in particija1 if x != []]
else:
particija1 = [x for x in particija if x != []]
duljine = [len(p) for p in particija1]
a = np.sum([np.sum([np.amax([self.config.dist_cs(particija1[i][x1], particija1[i][x2])
for x2 in range(len(particija1[i]))])
for x1 in range(len(particija1[i]))]) / duljine[i]
for i in range(len(particija1)) if duljine[i] > 1])
centri = [np.average(grupa, axis=0) for grupa in particija1]
b = np.sum([np.amin([self.config.dist_cs(centri[i], centri[j])
for j in range(len(particija1))
if j != i])
for i in range(len(particija1))])
return a / b
def fitness(self, particija=[]):
if self.config.fitness_metoda == 'cs':
return 1 / (self.fitness_cs(particija) + 0.000001)
else:
return 1 / (self.fitness_db(particija) + 0.000001)
class Populacija:
def __init__(self, config):
self.config = config
self.trenutna_generacija = []
for t in range(config.velicina_populacije):
self.trenutna_generacija.append(Kromosom(config))
def probni_vektor(self, k, t):
if random.random() < self.config.crossover_rate(t):
fiksirani = self.trenutna_generacija.pop(k)
izabrani = random.sample(self.trenutna_generacija, 3)
m, i, j = izabrani[0], izabrani[1], izabrani[2]
assert isinstance(i, Kromosom)
self.trenutna_generacija.insert(k, fiksirani) # vracamo privremeno izbaceni element k
return Kromosom(self.config, np.add(m.geni, np.multiply(self.config.scale_factor(), np.subtract(i.geni, j.geni))))
else:
return self.trenutna_generacija[k]
def evoluiraj(self, t):
iduca_generacija = []
for kr in range(self.config.velicina_populacije):
dobrost = self.trenutna_generacija[kr].fitness()
probni_vek = self.probni_vektor(kr, t)
dobrost_alt = probni_vek.fitness()
iduca_generacija.append(self.trenutna_generacija[kr] if dobrost > dobrost_alt else probni_vek)
self.trenutna_generacija = iduca_generacija
if __name__ == '__main__':
diffs = []
preskoci = 0
resfolder = os.path.join('..', 'res')
run_groups = []
run_paths = []
dbase = []
for dirname, dirnames, filenames in os.walk(resfolder):
for subdirname in dirnames:
basepath = os.path.join(resfolder, subdirname)
for dirname, dirnames, filenames in os.walk(basepath):
for f in filenames:
f = os.path.join(basepath, f)
#if "kmeans" in f or "dbscan" in f or '_Weights' in f:
if '_Weights' in f:
continue
#print f
if "kmeans" in f:
alg = "kmeans"
elif "acde" in f:
alg = "acde"
elif "dbscan" in f:
alg = "dbscan"
logger = log.Log()
logger.load(f)
msrs = logger.measures[len(logger.measures) - 1]
info = logger.head_as_array
fitm = info[3]
distm = info[4]
q = int(info[5])
t = int(info[6])
dbase.append([subdirname, alg, fitm, distm, q, t] + msrs[0:6])
dbase_wine = filter(lambda x: x[0] == 'Wine', dbase)
dbase_iris = filter(lambda x: x[0] == 'Iris', dbase)
dbase_glass = filter(lambda x: x[0] == 'Glass_1', dbase)
dbases = [dbase_glass, dbase_iris, dbase_wine]
# koja metrika u prosjeku najbolji score za scores 0 - 4 ?
latex_table1_prefix = textwrap.dedent("""\\begin{table}[h]
\\begin{tabular}{l|lllll}
& Rand & Zajednicka informacija & Homogenost & Potpunost & V-mjera \\\\ \\hline
""")
latex_table1_postfix = textwrap.dedent("""\\end{tabular}
\\caption{CAPT}
\\label{my-label}
\\end{table}
""")
for db in dbases:
#print db[0][0], "\n"
print latex_table1_prefix
reci = [str(br) for br in range(1, 7)]
for metric in range(5):
db_by_metric = sorted(filter(lambda x: x[1] == 'acde', db),
lambda x, y: -cmp(x[metric + 6], y[metric + 6])*100)
#print metric, db_by_metric
print 1
for run in zip(db_by_metric, range(1, 7)):
if run[0][3] == 'Cosine':
hrv = "Cos., "
elif run[0][3] == 'Minkowski_2':
hrv = "Eukl., "
elif run[0][3] == 'Mahalanobis':
hrv = "Mah., "
dbcs = (' db(' + str(run[0][4]) + ', ' + str(run[0][5]) +')') if run[0][2] == 'db' else ' cs'
reci[run[1] - 1] += " & \\begin{tabular}[c]{@{}l@{}}" + hrv + dbcs + " \\\\" + str(run[0][metric + 6]) + "\\end{tabular}"
for r in reci:
print r, "\\\\\n"
print latex_table1_postfix.replace('CAPT', db[0][0]), "\n\n"
exit()
# ako fiskiramo db/cs, sto je u prosjeku bolje?
for db in dbases:
print 2, db[0][0]
for metr in ['db', 'cs']:
filt = filter(lambda x: x[2] == metr, filter(lambda x: x[1] == 'acde', db))
filt = [x[6:] for x in filt]
filt = [np.array(f) for f in filt] # da mozemo sumirati
avg = sum(filt) / len(filt)
print metr, avg
print '\n\n'
# ako fiskiramo metriku i db/cs, sto je u prosjeku bolje?
for db in dbases:
print 3, db[0][0]
for metric in ["Minkowski_2", "Cosine", "Mahalanobis"]:
for metr in ['db', 'cs']:
filt = filter(lambda x: x[2] == metr and x[3] == metric, filter(lambda x: x[1] == 'acde', db))
filt = [x[6:] for x in filt]
filt = [np.array(f) for f in filt] # da mozemo sumirati
avg = sum(filt) / len(filt)
print metric, metr, avg
print '\n\n'
for db in dbases:
print 4, db[0][0]
for metric in ["Minkowski_2"]: # jer kmeans radi samo s mink
for metr in ['db', 'cs']:
latex_plot_text = textwrap.dedent("""\
\\begin{tikzpicture}
\\begin{axis}[%
scatter/classes={%
a={mark=square*,blue, mark size=6},%
b={mark=square*,red, mark size=6},%
c={mark=square*,green, mark size=6}}]
\\addplot[scatter,only marks,%
scatter src=explicit symbolic]%
table[meta=label] {
q t label
""")
for t in [int(x) for x in [1, 2, 4]]:
for q in [int(x) for x in [1, 2, 4]]:
if (q != 1 or t != 1) and metr == 'cs':
continue
# print metric, metr, q, t, '\n'
r_acde = filter(lambda x: x[1] == 'acde' and x[2] == metr and x[3] == metric and x[4] == q and x[5] == t, db)[0]
r_kmeans = filter(lambda x: x[1] == 'kmeans' and x[2] == metr and x[3] == metric and x[4] == q and x[5] == t, db)[0]
r_dbscan = filter(lambda x: x[1] == 'dbscan' and x[2] == metr and x[3] == metric and x[4] == q and x[5] == t, db)[0]
#print r_acde[-1:][0], '\n', r_kmeans[-1:][0], '\n', r_dbscan[-1:][0]
z = zip([r_acde[-6:-5][0], r_kmeans[-6:-5][0], r_dbscan[-6:-5][0]], ['a', 'b', 'c'])
#print z
rez = sorted(z, lambda x, y: cmp(y[0], x[0]))
latex_plot_text += str(q) + " " + str(t) + " " + rez[0][1] + "\n"
latex_plot_text += textwrap.dedent("""\
};
\\end{axis}
\\end{tikzpicture}""")
print "\n" + metr + "\n\n", latex_plot_text + "\n"
print '\n\n'
#print str(dbase_glass).replace('], [', '\n')
exit()
for dts in ['Iris', 'Glass', 'Wine']:
for mcl in [10]:
if dts == 'Iris':
mcl = 10
elif dts == 'Glass':
mcl = 25
elif dts == 'Wine':
mcl = 10
for dst in ["Minkowski_2"]: # , "Cosine", "Mahalanobis"], jer kmeans je za M2, pa da sve isto bude
for fs in [True, False]:
if dts == 'Wine' and fs:
continue # nemamo weightove za Wine
for fm in ['db', 'cs']:
for t in [1, 2, 4]:
for q in [1, 2, 4]:
if fm == 'cs' and (t != 1 or q != 1):
continue # ima i uvlacenje losih strana
if preskoci > 0:
preskoci -= 1
continue
confs = {
'Dataset' : dts,
'Number of generations' : 30,
'Population size': 1,
'Max clusters' : mcl,
'Fitness method': fm,
'q' : q,
't' : t,
'Distance measure': dst,
'Feature significance': fs
}
print confs
fname_sfx = confs['Dataset'] + "_" + confs['Distance measure'] + \
("_Weights_" if confs['Feature significance'] else "_noWeights_") + \
confs['Fitness method'] + \
('_' + str(confs['q']) + '_' + str(confs['t']) if confs['Fitness method'] == 'db' else "")
diffs.sort()
print diffs
| mit |
rohanp/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 17 | 5726 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[rng.randint(0, n_queries)]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', prop=dict(size='small'))
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
prheenan/appm_project_3 | code/q1_3.py | 1 | 5290 | # force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
# need to add the utilities class. Want 'home' to be platform independent
# import the patrick-specific utilities
import GenUtilities as pGenUtil
import PlotUtilities as pPlotUtil
import CheckpointUtilities as pCheckUtil
from scipy.stats import norm
outDir = "./out/"
g_title = 20
g_label = 18
fontsize=g_label
def getBinomials(nVals,p,nPoints):
nTrials = nVals.size
dataMatrix = np.zeros((nTrials,nPoints))
for i,n in enumerate(nVals):
dataMatrix[i,:] = np.random.binomial(n,p,nPoints)
return dataMatrix
def q12Dist(xBar,normalizer=2):
return -normalizer * np.log( 1 - xBar/normalizer)
def normHist(data,bins,**kwargs):
counts,bins = np.histogram(data,bins)
norm = counts / sum( np.diff(bins) * counts )
plt.bar(left=bins[:-1],height=norm,width=np.diff(bins),**kwargs)
return norm
def getXBar(n,xTrials,distFunc):
return distFunc(xTrials/n)
def getDeltaStats(n,p,xTrials,normalizer=2):
mu = n*p
distFunc = lambda x : q12Dist(x,normalizer)
gXBar = getXBar(n,xTrials,distFunc)
sigma= np.sqrt(p*(1-p))
gMu = distFunc(p)
gPrimeMu = (1/(1-p/normalizer) )
normalStd = abs(gPrimeMu) * sigma / np.sqrt(n)
return gXBar,gMu,normalStd
def getDeltaModel(n,p,xTrials,normalizer=2,normMean=True):
gXBar,gMu, normalStd = getDeltaStats(n,p,xTrials,normalizer)
normalDist = norm(loc=0,scale=normalStd)
dist = (gXBar- (gMu))
distMean = np.mean(dist)
distVar = np.std(dist)**2
return dist,distMean,distVar,normalDist,normalStd
def getDeltaModelDistr(n,p,xTrials,coverage=10):
# distFunc; what to call to get the distribution
# n/p is the number of possible values for anything in xTrials
# taking the log ceiling of this gives an upper bound for the number
# of bins for the log of xTrials
dist,distMean,distVar,normalDist,normalStd = \
getDeltaModel(n,p,xTrials)
sortedUniDist = np.sort(np.unique(dist))
minStep = np.min(np.abs(np.diff(sortedUniDist)))
xVals = np.linspace(-max(dist),max(dist),2*coverage*max(dist)/minStep)
nBins = np.arange(-max(dist),max(dist),minStep)
return dist,distMean,distVar,normalStd,normalDist,xVals,nBins
def plotSingleHist(n,p,xTrials,outDir):
# coverage is just a plotting artifact
fig = pPlotUtil.figure()
# mu: expected value of Binomial(n,p)
# effectie variance
dist,distMean,distVar,normalStd,normalDist,xVals,nBins = \
getDeltaModelDistr(n,p,xTrials)
normV = normHist(dist,nBins,\
label=("Actual Distr: Mean={:.4f},Stdev={:.4f}").\
format(distMean,np.sqrt(distVar)))
rawPDF = normalDist.pdf(xVals)
plt.plot(xVals,rawPDF,'r--',linewidth=5.0,
label="Theorertical Distr: Stdev={:.4f}".\
format(normalStd))
plt.title("Histogram for g(xBar)-g(mu) for n={:d},p={:.2f}".\
format(int(n),p),fontsize=g_title)
plt.xlabel("(g(Xbar)-g(mu)) ~ Normal(0,[g'(x)*sigma]^2/n)",
fontsize=g_label)
plt.ylabel("Proportion",fontsize=g_label)
plt.legend(frameon=False)
pPlotUtil.tickAxisFont()
catArr = list(rawPDF) + list(normV)
plt.ylim([0,max(catArr)*1.2])
plt.xlim([-max(nBins),max(nBins)])
pPlotUtil.tickAxisFont()
pPlotUtil.savefig(fig,outDir + "trial_n{:d}".format(int(n)))
#return the statistics for plotting
return distMean,distVar,normalStd**2
def plotBinomials(dataMatrix,nVals,p):
nTrials = nVals.size # rows are the trials
# same the mean and variances...
means = np.zeros(nTrials)
varReal = np.zeros(nTrials)
varDist = np.zeros(nTrials)
for i,n in enumerate(nVals):
means[i],varReal[i],varDist[i] =\
plotSingleHist(n,p,dataMatrix[i,:],outDir)
# plot the means and variances
fig = pPlotUtil.figure()
plt.subplot(1,2,1)
plt.title("Mean of g(xBar)-g(mu) approaches 0",fontsize=fontsize)
expMean = 0
plt.plot(nVals,means,'ko',label="Actual Mean")
plt.axhline(expMean,color='b',linestyle='--',
label="Expected Mean: {:.2g}".format(expMean))
plt.ylim(-min(means),max(means)*1.1)
plt.xlabel("Value of n for binomial",fontsize=fontsize)
plt.ylabel("Value of g(xBar)-g(mu)",fontsize=fontsize)
plt.legend(fontsize=fontsize)
pPlotUtil.tickAxisFont()
plt.subplot(1,2,2)
plt.semilogy(nVals,varReal,'ko',label="Actual Variance")
plt.semilogy(nVals,varDist,'b--',label="Expected Variance")
plt.title("Variance of g(xBar)-g(mu)\n approaches expected",
fontsize=fontsize)
plt.xlabel("Value of n for binomial",fontsize=fontsize)
plt.ylabel("Value of g(xBar) variance",fontsize=fontsize)
pPlotUtil.tickAxisFont()
plt.legend(fontsize=fontsize)
pPlotUtil.savefig(fig,outDir + "MeanVar")
if __name__ == '__main__':
_nVals = np.array([10,20,50,75,100,150,200,350,500,1000])
pGenUtil.ensureDirExists(outDir)
_p=1/3.
_nPoints = 1e5
dataMatrix = getBinomials(_nVals,_p,_nPoints)
plotBinomials(dataMatrix,_nVals,_p)
| gpl-2.0 |
rpalovics/Alpenglow | python/test_alpenglow/experiments/test_PosSamplingFactorExperiment.py | 2 | 7977 | import alpenglow as prs
import alpenglow.Getter as rs
import alpenglow.experiments
import alpenglow.evaluation
import pandas as pd
import math
import numpy as np
class TestPosSamplingFactorExperiment:
def test_posSamplingFactorExperiment(self):
factorExperiment = alpenglow.experiments.PosSamplingFactorExperiment(
top_k=100,
seed=254938879,
dimension=10,
base_learning_rate=0.2,
positive_learning_rate=0.05,
negative_rate=10,
positive_rate=3,
pool_size=3000
)
facRankings = factorExperiment.run(
"python/test_alpenglow/test_data_4",
experimentType="online_id",
exclude_known=True,
verbose=True)
assert facRankings.top_k == 100
desired_ranks = [101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 9.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 5.0, 101.0, 101.0, 3.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 3.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 1.0, 101.0, 101.0, 101.0, 17.0, 17.0, 16.0, 101.0, 101.0, 18.0, 101.0, 1.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 40.0, 101.0, 2.0, 43.0, 43.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 13.0, 101.0, 8.0, 101.0, 3.0, 101.0, 101.0, 101.0, 1.0, 101.0, 101.0, 1.0, 101.0, 52.0, 50.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 58.0, 101.0, 3.0, 61.0, 101.0, 15.0, 47.0, 52.0, 53.0, 101.0, 101.0, 55.0, 101.0, 101.0, 101.0, 22.0, 20.0, 25.0, 16.0, 101.0, 31.0, 3.0, 101.0, 101.0, 37.0, 43.0, 101.0, 101.0, 101.0, 65.0, 25.0, 56.0, 101.0, 29.0, 38.0, 7.0, 58.0, 4.0, 43.0, 3.0, 101.0, 36.0, 101.0, 101.0, 49.0, 2.0, 38.0, 101.0, 26.0, 9.0, 43.0, 69.0, 18.0, 68.0, 101.0, 25.0, 101.0, 101.0, 22.0, 101.0, 2.0, 80.0, 101.0, 16.0, 51.0, 17.0, 33.0, 9.0, 101.0, 46.0, 101.0, 101.0, 7.0, 101.0, 101.0, 101.0, 101.0, 76.0, 101.0, 9.0, 42.0, 101.0, 101.0, 101.0, 101.0, 83.0, 11.0, 80.0, 101.0, 101.0, 19.0, 101.0, 36.0, 101.0, 13.0, 9.0, 77.0, 11.0, 101.0, 101.0, 85.0, 67.0, 101.0, 63.0, 37.0, 10.0, 10.0, 96.0, 13.0, 101.0, 101.0, 101.0, 101.0, 16.0, 101.0, 6.0, 101.0, 1.0, 101.0, 17.0, 89.0, 101.0, 101.0, 4.0, 2.0, 59.0, 23.0, 101.0, 101.0, 18.0, 77.0, 38.0, 6.0, 101.0, 70.0, 101.0, 2.0, 90.0, 76.0, 75.0, 2.0, 22.0, 38.0, 101.0, 101.0, 101.0, 101.0, 101.0, 1.0, 36.0, 101.0, 101.0, 101.0, 35.0, 14.0, 97.0, 101.0, 57.0, 12.0, 13.0, 30.0, 101.0, 101.0, 101.0, 101.0, 3.0, 18.0, 65.0, 101.0, 8.0, 58.0, 85.0, 23.0, 1.0, 13.0, 101.0, 42.0, 74.0, 101.0, 101.0, 101.0, 101.0, 38.0, 90.0, 11.0, 101.0, 101.0, 100.0, 101.0, 58.0, 97.0, 101.0, 12.0, 84.0, 10.0, 24.0, 1.0, 44.0, 101.0, 101.0, 101.0, 22.0, 101.0, 101.0, 9.0, 101.0, 83.0, 88.0, 101.0, 101.0, 101.0, 101.0, 3.0, 63.0, 101.0, 73.0, 6.0, 4.0, 22.0, 55.0, 101.0, 101.0, 101.0, 101.0, 6.0, 101.0, 3.0, 101.0, 29.0, 101.0, 10.0, 101.0, 91.0, 1.0, 101.0, 101.0, 101.0, 59.0, 3.0, 41.0, 1.0, 101.0, 90.0, 6.0, 101.0, 3.0, 101.0, 94.0, 4.0, 2.0, 10.0, 48.0, 101.0, 7.0, 1.0, 101.0, 1.0, 101.0, 6.0, 37.0, 56.0, 1.0, 76.0, 101.0, 30.0, 101.0, 1.0, 49.0, 101.0, 101.0, 5.0, 24.0, 6.0, 101.0, 101.0, 101.0, 101.0, 1.0, 20.0, 101.0, 7.0, 55.0, 101.0, 1.0, 44.0, 101.0, 26.0, 94.0, 10.0, 19.0, 67.0, 101.0, 4.0, 1.0, 101.0, 101.0, 21.0, 56.0, 58.0, 84.0, 101.0, 101.0, 101.0, 101.0, 23.0, 101.0, 80.0, 101.0, 50.0, 101.0, 81.0, 101.0, 42.0, 101.0, 101.0, 9.0, 5.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 26.0, 101.0, 101.0, 5.0, 101.0, 101.0, 36.0, 101.0, 14.0, 101.0, 60.0, 101.0, 8.0, 101.0, 20.0, 34.0, 101.0, 101.0, 101.0, 2.0, 101.0, 1.0, 25.0, 46.0, 101.0, 1.0, 5.0, 93.0, 4.0, 5.0, 1.0, 101.0, 72.0, 92.0, 8.0, 43.0, 100.0, 13.0, 41.0, 101.0, 101.0, 82.0, 101.0, 14.0, 101.0, 14.0, 17.0, 101.0, 48.0, 73.0, 44.0, 2.0, 58.0, 1.0, 2.0, 101.0, 1.0, 101.0, 26.0, 101.0, 98.0, 101.0, 97.0, 7.0, 101.0, 101.0, 101.0, 1.0, 101.0, 85.0, 36.0, 100.0, 78.0, 7.0, 101.0, 101.0, 5.0, 101.0, 101.0, 101.0, 101.0, 17.0, 101.0, 101.0, 19.0, 79.0, 14.0, 27.0, 8.0, 5.0, 52.0, 59.0, 101.0, 1.0, 46.0, 6.0, 82.0, 101.0, 1.0, 101.0, 101.0, 12.0, 92.0, 81.0, 101.0, 17.0, 8.0, 2.0, 7.0, 11.0, 101.0, 101.0, 1.0, 101.0, 70.0, 16.0, 101.0, 1.0, 101.0, 101.0, 12.0, 3.0, 101.0, 61.0, 7.0, 67.0, 101.0, 101.0, 101.0, 46.0, 101.0, 9.0, 18.0, 101.0, 4.0, 11.0, 4.0, 60.0, 15.0, 61.0, 101.0, 8.0, 101.0, 90.0, 40.0, 1.0, 101.0, 101.0, 101.0, 101.0, 2.0, 101.0, 28.0, 101.0, 4.0, 1.0, 101.0, 3.0, 101.0, 101.0, 101.0, 33.0, 36.0, 101.0, 101.0, 101.0, 19.0, 101.0, 101.0, 22.0, 8.0, 92.0, 101.0, 101.0, 85.0, 19.0, 101.0, 7.0, 101.0, 18.0, 3.0, 52.0, 55.0, 10.0, 1.0, 32.0, 101.0, 1.0, 101.0, 7.0, 101.0, 97.0, 62.0, 11.0, 8.0, 101.0, 94.0, 101.0, 4.0, 101.0, 101.0, 64.0, 88.0, 86.0, 11.0, 101.0, 90.0, 101.0, 52.0, 1.0, 17.0, 101.0, 51.0, 82.0, 101.0, 101.0, 4.0, 8.0, 10.0, 101.0, 101.0, 8.0, 101.0, 101.0, 33.0, 101.0, 26.0, 91.0, 101.0, 101.0, 101.0, 15.0, 77.0, 101.0, 101.0, 101.0, 34.0, 22.0, 101.0, 3.0, 101.0, 24.0, 101.0, 24.0, 37.0, 48.0, 2.0, 12.0, 18.0, 4.0, 11.0, 101.0, 10.0, 7.0, 3.0, 101.0, 3.0, 101.0, 11.0, 26.0, 23.0, 8.0, 11.0, 101.0, 79.0, 55.0, 101.0, 47.0, 101.0, 101.0, 5.0, 48.0, 14.0, 101.0, 22.0, 2.0, 2.0, 14.0, 101.0, 57.0, 65.0, 101.0, 22.0, 101.0, 101.0, 1.0, 1.0, 101.0, 5.0, 101.0, 16.0, 101.0, 23.0, 24.0, 17.0, 9.0, 1.0, 19.0, 101.0, 101.0, 81.0, 44.0, 101.0, 101.0, 101.0, 4.0, 3.0, 3.0, 101.0, 24.0, 14.0, 4.0, 4.0, 27.0, 71.0, 10.0, 66.0, 20.0, 75.0, 73.0, 95.0, 55.0, 101.0, 12.0, 4.0, 101.0, 101.0, 101.0, 101.0, 17.0, 1.0, 15.0, 101.0, 4.0, 1.0, 101.0, 42.0, 7.0, 13.0, 2.0, 101.0, 101.0, 15.0, 10.0, 5.0, 1.0, 101.0, 21.0, 14.0, 101.0, 101.0, 4.0, 84.0, 3.0, 9.0, 49.0, 21.0, 101.0, 13.0, 17.0, 2.0, 18.0, 101.0, 67.0, 101.0, 92.0, 87.0, 101.0, 13.0, 7.0, 96.0, 101.0, 32.0, 5.0, 10.0, 25.0, 7.0, 101.0, 101.0, 40.0, 18.0, 9.0, 1.0, 10.0, 82.0, 11.0, 5.0, 101.0, 8.0, 8.0, 101.0, 2.0, 1.0, 34.0, 101.0, 1.0, 27.0, 100.0, 4.0, 11.0, 1.0, 101.0, 101.0, 101.0, 101.0, 101.0, 12.0, 57.0, 2.0, 101.0, 17.0, 13.0, 33.0, 10.0, 101.0, 2.0, 101.0, 74.0, 101.0, 101.0, 101.0, 101.0, 101.0, 4.0, 32.0, 22.0, 12.0, 101.0, 101.0, 71.0, 8.0, 37.0, 11.0, 6.0, 101.0, 22.0, 4.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 9.0, 27.0, 17.0, 11.0, 64.0, 101.0, 2.0, 8.0, 49.0, 101.0, 18.0, 11.0, 12.0, 5.0, 58.0, 8.0, 17.0, 10.0, 85.0, 6.0, 6.0, 101.0, 58.0, 5.0, 14.0, 2.0, 101.0, 28.0, 6.0, 10.0, 101.0, 21.0, 6.0, 8.0, 24.0, 101.0, 101.0, 101.0, 31.0, 22.0, 101.0, 101.0, 72.0, 101.0, 101.0, 24.0, 9.0, 36.0, 7.0, 27.0, 101.0, 3.0, 101.0, 101.0, 101.0, 101.0, 101.0, 22.0, 34.0, 11.0, 101.0, 101.0, 2.0, 101.0, 18.0, 101.0, 18.0, 1.0, 11.0, 26.0, 49.0, 101.0, 101.0, 44.0, 101.0, 101.0, 10.0, 101.0, 18.0, 101.0, 11.0, 12.0, 5.0, 9.0, 2.0, 26.0, 1.0, 101.0, 88.0, 5.0, 31.0, 22.0, 3.0, 101.0, 15.0, 81.0, 101.0, 12.0, 10.0, 69.0, 1.0, 38.0, 101.0, 14.0, 6.0, 56.0, 81.0, 48.0, 50.0, 16.0, 1.0, 101.0, 1.0, 15.0, 82.0, 16.0, 101.0, 4.0, 5.0, 26.0, 101.0, 18.0, 1.0, 101.0, 1.0, 2.0, 101.0, 101.0, 5.0, 31.0, 19.0, 30.0, 101.0, 101.0, 101.0]
print(list(facRankings["rank"].fillna(101)))
assert list(facRankings["rank"].fillna(101)) == desired_ranks
def test_posSamplingFactorExperiment_2b(self):
data = pd.read_csv(
"python/test_alpenglow/test_data_1",
)
factorExperiment = alpenglow.experiments.PosSamplingFactorExperiment(
top_k=100,
seed=254938879,
dimension=10,
base_learning_rate=0.2,
positive_learning_rate=0.05,
negative_rate=10,
positive_rate=3,
pool_size=3000
)
facRankings = factorExperiment.run(data, verbose=True, exclude_known=True)
assert facRankings.top_k == 100
desired_ranks = [101.0, 101.0, 101.0, 2.0, 1.0, 2.0, 101.0, 101.0, 1.0]
print(list(facRankings["rank"].fillna(101)))
assert list(facRankings["rank"].fillna(101)) == desired_ranks
| apache-2.0 |
zangsir/sms-tools | lectures/06-Harmonic-model/plots-code/sines-partials-harmonics.py | 24 | 2020 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/sine-440-490.wav')
w = np.hamming(3529)
N = 32768
hN = N/2
t = -20
pin = 4850
x1 = x[pin:pin+w.size]
mX1, pX1 = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX1, t)
pmag = mX1[ploc]
iploc, ipmag, ipphase = UF.peakInterp(mX1, pX1, ploc)
plt.figure(1, figsize=(9, 6))
plt.subplot(311)
plt.plot(fs*np.arange(mX1.size)/float(N), mX1-max(mX1), 'r', lw=1.5)
plt.plot(fs * iploc/N, ipmag-max(mX1), marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([200, 1000, -80, 4])
plt.title('mX + peaks (sine-440-490.wav)')
(fs, x) = UF.wavread('../../../sounds/vibraphone-C6.wav')
w = np.blackman(401)
N = 1024
hN = N/2
t = -80
pin = 200
x2 = x[pin:pin+w.size]
mX2, pX2 = DFT.dftAnal(x2, w, N)
ploc = UF.peakDetection(mX2, t)
pmag = mX2[ploc]
iploc, ipmag, ipphase = UF.peakInterp(mX2, pX2, ploc)
plt.subplot(3,1,2)
plt.plot(fs*np.arange(mX2.size)/float(N), mX2-max(mX2), 'r', lw=1.5)
plt.plot(fs * iploc/N, ipmag-max(mX2), marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([500,10000,-100,4])
plt.title('mX + peaks (vibraphone-C6.wav)')
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
w = np.blackman(651)
N = 2048
hN = N/2
t = -80
pin = 10000
x3 = x[pin:pin+w.size]
mX3, pX3 = DFT.dftAnal(x3, w, N)
ploc = UF.peakDetection(mX3, t)
pmag = mX3[ploc]
iploc, ipmag, ipphase = UF.peakInterp(mX3, pX3, ploc)
plt.subplot(3,1,3)
plt.plot(fs*np.arange(mX3.size)/float(N), mX3-max(mX3), 'r', lw=1.5)
plt.plot(fs * iploc/N, ipmag-max(mX3), marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([0,6000,-70,2])
plt.title('mX + peaks (oboe-A4.wav)')
plt.tight_layout()
plt.savefig('sines-partials-harmonics.png')
plt.show()
| agpl-3.0 |
GGiecold/ECLAIR | src/ECLAIR/Statistical_performance/Robustness_analysis.py | 1 | 18145 | #!/usr/bin/env python
# ECLAIR/src/ECLAIR/Statistics/Robustness_analysis.py;
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: [email protected]; [email protected]
"""ECLAIR is a package for the robust and scalable
inference of cell lineages from gene expression data.
ECLAIR achieves a higher level of confidence in the estimated lineages
through the use of approximation algorithms for consensus clustering and by combining the information from an ensemble of minimum spanning trees
so as to come up with an improved, aggregated lineage tree.
In addition, the present package features several customized algorithms for assessing the similarity between weighted graphs or unrooted trees and for estimating the reproducibility of each edge to a given tree.
References
----------
* Giecold, G., Marco, E., Trippa, L. and Yuan, G.-C.,
"Robust Lineage Reconstruction from High-Dimensional Single-Cell Data".
ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748
* Strehl, A. and Ghosh, J., "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
* Conte, D., Foggia, P., Sansone, C. and Vento, M.,
"Thirty Years of Graph Matching in Pattern Recognition".
In: International Journal of Pattern Recognition and Artificial Intelligence,
18, 3, pp. 265-298. 2004
"""
from __future__ import print_function
from ..Build_instance import ECLAIR_core as ECLR
from .Statistical_tests import robustness_metrics
from collections import namedtuple
import numpy as np
import operator
import os
import pkg_resources
import random
from sklearn import cross_validation
from sklearn.metrics import pairwise_distances_argmin_min
from tempfile import NamedTemporaryFile
import tarfile
import time
import zipfile
__all__ = ['ECLAIR_generator', 'experiment_1', 'experiment_2', 'experiment_3']
def extract_file(path, output_directory = '.'):
if path.endswith('.zip'):
opener, mode = zipfile.ZipFile, 'r'
elif path.endswith('.tar.gz') or path.endswith('.tgz'):
opener, mode = tarfile.open, 'r:gz'
elif path.endswith('.tar.bz2') or path.endswith('.tbz'):
opener, mode = tarfile.open, 'r:bz2'
else:
raise ValueError, "\nERROR: ECLAIR: Robustness_analysis: failed to extract {0}; no appropriate extractor could be found".format(path)
cwd = os.getcwd()
os.chdir(output_directory)
try:
file = opener(path, mode)
try:
file.extractall()
finally:
file.close()
finally:
os.chdir(cwd)
def ECLAIR_generator(data_file_name, sampling_fraction, N_runs, N_iter,
method, k, output_directory, data_flag = 'CyTOF'):
"""Automatically runs the ECLAIR method on a dataset
accessible via 'data_file_name' so as to generate 'N_iter'
independent consensus clusterings and associated minimum spanning
trees.
Parameters
----------
data_file_name : file object or string
A path to the dataset under consideration.
Any dataset can be submitted to this routine,
with the proviso that it has previously been
mangled to meet a few constraints regarding
headers, delimiters, etc.
Those constraints are handled hereby
for a qPCR dataset and an aggregation
of flow cytometry mouse bone marrow samples.
sampling_fraction : float
Specifies the number of points to downsample from the
dataset considered a each of 'N_iter' stages,
before applying k-means clustering
to this group obtained via a density-based approach.
k : int
The parameter used for k-means clustering each
downsampled group of data-points, as required
for all 'N_runs' intermediate steps of ECLAIR.
N_runs : int
The number of independent runs of downsampling and clustering
to perform before applying our ensemble clustering algorithm
to this group.
N_iter : int
The number of ensemble clusterings and accompanying trees
to generate by k-fold cross validation, with k = N_iter.
We randomly reshuffle the dataset and split it into
N_iter equally-sized parts. Of the N_iter subsamples,
a single subsample is kept as a 'validation data', while
the other serve as the 'training data' from which we build
an ensemble clustering and afferent minimum spanning tree.
Only upon obtaining this ensemble clustering, do we
ascribe each data point from the left-over 'validation' subsample
to its nearest cluster in gene expression space.
Each sample from the whole dataset therefore has a cluster label.
Such vectors of cluster identities are then used in other
functions of this module for various comparisons between trees
and consensus clusterings.
output_directory : file object or string
The path to the folder where the information and figures
associated with each of 'N_iter' rounds of consensus clustering
are to be stored.
test_set_flag : bool, optional (default = False)
data_flag : string, optional (default = 'CyTOF')
Allows the processing of a 'CyTOF' dataset
(Supplementary dataset 2 from
Qiu et al., Nature Biotechnology, Vol. 29, 10 (2011))
Returns
-------
name_tags : list
Within 'output_directory', records the names of the folder
associated to each of 'N_iter' consensus clusterings obtained.
"""
assert method in {'hierarchical', 'k-means'}
assert data_flag in {'CyTOF', 'qPCR'}
# Our method has been thoroughly tested on the two corresponding datasets.
# Unlike the preceding procedures, 'ECLAIR_generator' is akin to a script
# due to all the peculiarities in the number of features kept
# for downstream analysis, separators, etc.
if data_flag == 'CyTOF':
skiprows = 1
delimiter = '\t'
usecols = [3, 4, 5, 7, 8, 9, 10, 12, 13]
elif data_flag == 'qPCR':
skiprows = 1
delimiter = '\t'
usecols = xrange(1, 49)
# keeping open the addition of other datasets
# to be submitted to the present routine
with open(data_file_name, 'r') as f:
data = np.loadtxt(f, dtype = float, skiprows = skiprows,
delimiter = delimiter, usecols = usecols)
# in the case of the CyTOF mouse bone marrow experiment,
# load the samples resulting from an arcSinh transformation
# applied to the raw dataset
if method == 'hierarchical':
HIERARCHICAL_parameters = namedtuple('HIERARCHICAL_parameters',
'clustering_method k')
clustering_parameters = HIERARCHICAL_parameters('hierarchical', k)
elif method == 'k-means':
KMEANS_parameters = namedtuple('KMEANS_parameters', 'clustering_method k')
clustering_parameters = KMEANS_parameters('k-means', k)
# leaving open the extension of this analysis to other clustering methods
CC_parameters = namedtuple('CC_parameters', 'N_runs sampling_fraction N_cc')
cc_parameters = CC_parameters(N_runs, sampling_fraction, k)
try:
os.makedirs(output_directory)
except OSError:
if not os.path.isdir(output_directory):
print('\nECLAIR_generator\t ERROR\n')
raise
N_samples = data.shape[0]
# separate the samples into 'N_iter' groups of equal size,
# by random selection with no replacement:
kf = cross_validation.KFold(N_samples, n_folds = N_iter, shuffle = True)
name_tags = []
c = 1
for test_indices, train_indices in kf:
training_data = np.take(data, train_indices, axis = 0)
if data_flag == 'CyTOF':
# replacing by cell IDs the column keeping
# track of measurement times:
training_data[:, 0] = np.arange(train_indices.size)
train_indices = train_indices.reshape((1, train_indices.size))
with open(output_directory + '/training_{}.txt'.format(c), 'w') as f:
np.savetxt(f, train_indices, fmt = '%d', delimiter = '\t', newline = '\n')
with open(output_directory + '/training_data_{}.tsv'.format(c), 'w') as f:
np.savetxt(f, training_data, fmt = '%.6f', delimiter = '\t')
Data_info = namedtuple('Data_info', 'data_file_name expected_N_samples skip_rows cell_IDs_column extra_excluded_columns time_info_column')
data_info = Data_info(output_directory + '/training_data_{}.tsv'.format(c),
train_indices.size, 0, 0, None, -1)
with NamedTemporaryFile('w', suffix = '.h5', delete = True, dir = './') as f:
name_tag = ECLR.ECLAIR_processing(f.name, data_info,
clustering_parameters, cc_parameters, output_directory)
name_tags.append(name_tag)
cluster_IDs_file = output_directory + '/ECLAIR_ensemble_clustering_files/' + str(name_tags[-1]) + '/consensus_labels.txt'
with open(cluster_IDs_file, 'r') as f:
cluster_IDs = np.loadtxt(f, dtype = int)
method = clustering_parameters.clustering_method
cluster_IDs = upsample(test_indices, cluster_IDs, data, method,
xrange(1, data.shape[1]))
os.remove(cluster_IDs_file)
with open(cluster_IDs_file, 'w') as f:
np.savetxt(f, cluster_IDs, fmt = '%d', delimiter = '\t')
c += 1
return name_tags
def upsample(test_indices, training_set_cluster_IDs, data,
method = 'k-means', usecols = None):
N_samples = test_indices.size + training_set_cluster_IDs.size
assert N_samples == data.shape[0]
full_set_cluster_IDs = np.zeros(N_samples, dtype = int)
training_indices = np.setdiff1d(np.arange(N_samples), test_indices, True)
full_set_cluster_IDs[training_indices] = training_set_cluster_IDs
if usecols is not None:
usecols = list(usecols)
data = np.take(data, usecols, 1)
training_data = np.delete(data, test_indices, axis = 0)
max_ID = np.amax(training_set_cluster_IDs)
centroids = np.zeros((max_ID + 1, data.shape[1]), dtype = float)
for cluster in xrange(max_ID + 1):
samples_in_cluster = np.where(training_set_cluster_IDs == cluster)[0]
if method == 'hierarchical':
centroids[cluster] = np.median(training_data[samples_in_cluster],
axis = 0)
else:
centroids[cluster] = training_data[samples_in_cluster].mean(axis = 0)
test_data = np.take(data, test_indices, axis = 0)
test_set_cluster_IDs, _ = pairwise_distances_argmin_min(test_data, centroids,
metric = 'manhattan' if method == 'hierarchical' else 'euclidean')
full_set_cluster_IDs[test_indices] = test_set_cluster_IDs
return full_set_cluster_IDs
def experiment_1(N_iter, data_flags, method = 'k-means', test_set_flag = True):
"""
Parameters:
-----------
N_iter : int
Number of replicate experiments to generate
"""
assert not reduce(operator.and_, data_flags)
assert reduce(operator.xor, data_flags)
assert isinstance(N_iter, int) and N_iter > 1
try:
os.makedirs('./ECLAIR_performance')
except OSError:
if not os.path.isdir('./ECLAIR_performance'):
print('\nERROR: ECLAIR: Robustness_analysis: experiment_1\n')
raise
start_t = time.time()
ECLAIR_qPCR_flag, ECLAIR_CyTOF_flag, SPADE_CyTOF_flag = data_flags
if ECLAIR_CyTOF_flag:
output_directory = './ECLAIR_performance/ECLAIR_test_sets_CyTOF'
try:
os.makedirs(output_directory)
except OSError:
if not os.path.isdir(output_directory):
print('\nERROR: ECLAIR: Robustness_analysis: experiment_1\n')
raise
# Access path to the CyTOF mouse bone marrow dataset
compressed_data_path = pkg_resources.resource_filename(__name__,
'data/SPADE_data/nbt-SD2-Transformed.tsv.tar.gz')
extract_file(compressed_data_path, './ECLAIR_performance')
data_file = './ECLAIR_performance/nbt-SD2-Transformed.tsv'
max_N_clusters = 50
name_tags = ECLAIR_generator(data_file, 0.5, 100, N_iter, method, max_N_clusters, output_directory)
_ = robustness_metrics(max_N_clusters, output_directory + '/ECLAIR_ensemble_clustering_files', name_tags,
output_directory, test_set_flag)
_ = robustness_metrics(max_N_clusters, output_directory + '/ECLAIR_ensemble_clustering_files', name_tags,
output_directory, test_set_flag, MST_flag = False)
elif ECLAIR_qPCR_flag:
data_file = pkg_resources.resource_filename(__name__,
'data/Guoji_data/qPCR.txt')
output_directory = './ECLAIR_performance/ECLAIR_test_sets_qPCR'
try:
os.makedirs(output_directory)
except OSError:
if not os.path.isdir(output_directory):
print('\nERROR: ECLAIR: Robustness_analysis: experiment_1\n')
raise
max_N_clusters = 13
name_tags = ECLAIR_generator(data_file, 0.2, 50, N_iter, method,
max_N_clusters, output_directory, 'qPCR')
_ = robustness_metrics(max_N_clusters, output_directory + '/ECLAIR_ensemble_clustering_files', name_tags,
output_directory, test_set_flag)
_ = robustness_metrics(max_N_clusters, output_directory + '/ECLAIR_ensemble_clustering_files', name_tags,
output_directory, test_set_flag, MST_flag = False)
elif SPADE_CyTOF_flag:
max_N_clusters = 50
output_directory = './ECLAIR_performance/SPADE_test_sets_CyTOF'
try:
os.makedirs(output_directory)
except OSError:
if not os.path.isdir(output_directory):
print('\nERROR: ECLAIR: Robustness_analysis: experiment_1\n')
raise
SPADE_files = pkg_resources.resource_filename(__name__,
'data/SPADE_test_sets')
for i in xrange(1, 4):
with open(SPADE_files + '/training_{0}.txt'.format(i), 'r') as f:
training_set = np.loadtxt(f, dtype = int, delimiter = '\t')
with open(output_directory + '/training_{0}.txt'.format(i), 'w') as f:
np.savetxt(f, training_set, fmt = '%d', delimiter = '\t')
name_tags = ['training_1', 'training_2', 'training_3']
_ = robustness_metrics(max_N_clusters, SPADE_files, name_tags,
output_directory, test_set_flag)
end_t = time.time()
print('\n{}_robustness\t SUMMARY\t:\nthe whole process of comparing those minimum-spanning trees and the associated consensus clusterings took {} seconds.\n'.format('SPADE' if SPADE_CyTOF_flag else 'ECLAIR', round(end_t - start_t, 2)))
def experiment_2(data_file_name, k, sampling_fraction = 0.2, N_runs = 50):
output_directory = './ECLAIR_performance/ECLAIR_same_dataset'
try:
os.makedirs(output_directory)
except OSError:
raise
with open(data_file_name, 'r') as f:
data = np.loadtxt(f, dtype = float, skiprows = 1, delimiter = '\t')
N_samples = data.shape[0]
for i in xrange(1, 51):
with open(output_directory + '/training_{}.txt'.format(i), 'w') as f:
np.savetxt(f, np.arange(N_samples), fmt = '%d')
KMEANS_parameters = namedtuple('KMEANS_parameters', 'clustering_method k')
clustering_parameters = KMEANS_parameters('k-means', k)
CC_parameters = namedtuple('CC_parameters', 'N_runs sampling_fraction N_cc')
cc_parameters = CC_parameters(N_runs, sampling_fraction, k)
Data_info = namedtuple('Data_info', 'data_file_name expected_N_samples skip_rows cell_IDs_column extra_excluded_columns time_info_column')
data_info = Data_info(data_file_name, N_samples, 1, 0, None, -1)
name_tags = []
for i in xrange(50):
with NamedTemporaryFile('w', suffix = '.h5', delete = True, dir = './') as f:
name_tag = ECLR.ECLAIR_processing(f.name, data_info,
clustering_parameters, cc_parameters,
output_directory)
name_tags.append(name_tag)
_ = robustness_metrics(k, output_directory + '/ECLAIR_ensemble_clustering_files',
name_tags, output_directory, test_set_flag = False)
_ = robustness_metrics(k, output_directory + '/ECLAIR_ensemble_clustering_files',
name_tags, output_directory, test_set_flag = False,
MST_flag = False)
def experiment_3():
output_directory = './ECLAIR_performance/SPADE_same_dataset'
try:
os.makedirs(output_directory)
except OSError:
if not os.path.isdir(output_directory):
print('\nERROR: ECLAIR: Robustness_analysis: experiment_3\n')
raise
max_N_clusters = 50
name_tags = ['training_{0}'.format(i) for i in xrange(1, 11)]
SPADE_files = pkg_resources.resource_filename(__name__,
'data/SPADE_same_dataset')
with open(SPADE_files + '/training.txt', 'r') as f:
training_set = np.loadtxt(f, dtype = int, delimiter = '\t')
for i in xrange(1, 11):
with open(output_directory + '/training_{0}.txt'.format(i), 'w') as f:
np.savetxt(f, training_set, fmt = '%d', delimiter = '\t')
_ = robustness_metrics(max_N_clusters, SPADE_files, name_tags,
output_directory, test_set_flag = False)
| mit |
pprett/statsmodels | statsmodels/sandbox/stats/stats_mstats_short.py | 5 | 14860 | '''get versions of mstats percentile functions that also work with non-masked arrays
uses dispatch to mstats version for difficult cases:
- data is masked array
- data requires nan handling (masknan=True)
- data should be trimmed (limit is non-empty)
handle simple cases directly, which doesn't require apply_along_axis
changes compared to mstats: plotting_positions for n-dim with axis argument
addition: plotting_positions_w1d: with weights, 1d ndarray only
TODO:
consistency with scipy.stats versions not checked
docstrings from mstats not updated yet
code duplication, better solutions (?)
convert examples to tests
rename alphap, betap for consistency
timing question: one additional argsort versus apply_along_axis
weighted plotting_positions
- I haven't figured out nd version of weighted plotting_positions
- add weighted quantiles
'''
import numpy as np
from numpy import ma
from scipy import stats
#from numpy.ma import nomask
#####--------------------------------------------------------------------------
#---- --- Percentiles ---
#####--------------------------------------------------------------------------
def quantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None,
limit=(), masknan=False):
"""
Computes empirical quantiles for a data array.
Samples quantile are defined by :math:`Q(p) = (1-g).x[i] +g.x[i+1]`,
where :math:`x[j]` is the *j*th order statistic, and
`i = (floor(n*p+m))`, `m=alpha+p*(1-alpha-beta)` and `g = n*p + m - i`.
Typical values of (alpha,beta) are:
- (0,1) : *p(k) = k/n* : linear interpolation of cdf (R, type 4)
- (.5,.5) : *p(k) = (k+1/2.)/n* : piecewise linear
function (R, type 5)
- (0,0) : *p(k) = k/(n+1)* : (R type 6)
- (1,1) : *p(k) = (k-1)/(n-1)*. In this case, p(k) = mode[F(x[k])].
That's R default (R type 7)
- (1/3,1/3): *p(k) = (k-1/3)/(n+1/3)*. Then p(k) ~ median[F(x[k])].
The resulting quantile estimates are approximately median-unbiased
regardless of the distribution of x. (R type 8)
- (3/8,3/8): *p(k) = (k-3/8)/(n+1/4)*. Blom.
The resulting quantile estimates are approximately unbiased
if x is normally distributed (R type 9)
- (.4,.4) : approximately quantile unbiased (Cunnane)
- (.35,.35): APL, used with PWM ?? JP
- (0.35, 0.65): PWM ?? JP p(k) = (k-0.35)/n
Parameters
----------
a : array-like
Input data, as a sequence or array of dimension at most 2.
prob : array-like, optional
List of quantiles to compute.
alpha : float, optional
Plotting positions parameter, default is 0.4.
beta : float, optional
Plotting positions parameter, default is 0.4.
axis : int, optional
Axis along which to perform the trimming.
If None (default), the input array is first flattened.
limit : tuple
Tuple of (lower, upper) values.
Values of `a` outside this closed interval are ignored.
Returns
-------
quants : MaskedArray
An array containing the calculated quantiles.
Examples
--------
>>> from scipy.stats.mstats import mquantiles
>>> a = np.array([6., 47., 49., 15., 42., 41., 7., 39., 43., 40., 36.])
>>> mquantiles(a)
array([ 19.2, 40. , 42.8])
Using a 2D array, specifying axis and limit.
>>> data = np.array([[ 6., 7., 1.],
[ 47., 15., 2.],
[ 49., 36., 3.],
[ 15., 39., 4.],
[ 42., 40., -999.],
[ 41., 41., -999.],
[ 7., -999., -999.],
[ 39., -999., -999.],
[ 43., -999., -999.],
[ 40., -999., -999.],
[ 36., -999., -999.]])
>>> mquantiles(data, axis=0, limit=(0, 50))
array([[ 19.2 , 14.6 , 1.45],
[ 40. , 37.5 , 2.5 ],
[ 42.8 , 40.05, 3.55]])
>>> data[:, 2] = -999.
>>> mquantiles(data, axis=0, limit=(0, 50))
masked_array(data =
[[19.2 14.6 --]
[40.0 37.5 --]
[42.8 40.05 --]],
mask =
[[False False True]
[False False True]
[False False True]],
fill_value = 1e+20)
"""
if isinstance(a, np.ma.MaskedArray):
return stats.mstats.mquantiles(a, prob=prob, alphap=alphap, betap=alphap, axis=axis,
limit=limit)
if limit:
marr = stats.mstats.mquantiles(a, prob=prob, alphap=alphap, betap=alphap, axis=axis,
limit=limit)
return ma.filled(marr, fill_value=np.nan)
if masknan:
nanmask = np.isnan(a)
if nanmask.any():
marr = ma.array(a, mask=nanmask)
marr = stats.mstats.mquantiles(marr, prob=prob, alphap=alphap, betap=alphap,
axis=axis, limit=limit)
return ma.filled(marr, fill_value=np.nan)
# Initialization & checks ---------
data = np.asarray(a)
p = np.array(prob, copy=False, ndmin=1)
m = alphap + p*(1.-alphap-betap)
isrolled = False
#from _quantiles1d
if (axis is None):
data = data.ravel() #reshape(-1,1)
axis = 0
else:
axis = np.arange(data.ndim)[axis]
data = np.rollaxis(data, axis)
isrolled = True # keep track, maybe can be removed
x = np.sort(data, axis=0)
n = x.shape[0]
returnshape = list(data.shape)
returnshape[axis] = p
#TODO: check these
if n == 0:
return np.empty(len(p), dtype=float)
elif n == 1:
return np.resize(x, p.shape)
aleph = (n*p + m)
k = np.floor(aleph.clip(1, n-1)).astype(int)
ind = [None]*x.ndim
ind[0] = slice(None)
gamma = (aleph-k).clip(0,1)[ind]
q = (1.-gamma)*x[k-1] + gamma*x[k]
if isrolled:
return np.rollaxis(q, 0, axis+1)
else:
return q
def scoreatpercentile(data, per, limit=(), alphap=.4, betap=.4, axis=0, masknan=None):
"""Calculate the score at the given 'per' percentile of the
sequence a. For example, the score at per=50 is the median.
This function is a shortcut to mquantile
"""
per = np.asarray(per, float)
if (per < 0).any() or (per > 100.).any():
raise ValueError("The percentile should be between 0. and 100. !"\
" (got %s)" % per)
return quantiles(data, prob=[per/100.], alphap=alphap, betap=betap,
limit=limit, axis=axis, masknan=masknan).squeeze()
def plotting_positions(data, alpha=0.4, beta=0.4, axis=0, masknan=False):
"""Returns the plotting positions (or empirical percentile points) for the
data.
Plotting positions are defined as (i-alpha)/(n+1-alpha-beta), where:
- i is the rank order statistics (starting at 1)
- n is the number of unmasked values along the given axis
- alpha and beta are two parameters.
Typical values for alpha and beta are:
- (0,1) : *p(k) = k/n* : linear interpolation of cdf (R, type 4)
- (.5,.5) : *p(k) = (k-1/2.)/n* : piecewise linear function (R, type 5)
(Bliss 1967: "Rankit")
- (0,0) : *p(k) = k/(n+1)* : Weibull (R type 6), (Van der Waerden 1952)
- (1,1) : *p(k) = (k-1)/(n-1)*. In this case, p(k) = mode[F(x[k])].
That's R default (R type 7)
- (1/3,1/3): *p(k) = (k-1/3)/(n+1/3)*. Then p(k) ~ median[F(x[k])].
The resulting quantile estimates are approximately median-unbiased
regardless of the distribution of x. (R type 8), (Tukey 1962)
- (3/8,3/8): *p(k) = (k-3/8)/(n+1/4)*.
The resulting quantile estimates are approximately unbiased
if x is normally distributed (R type 9) (Blom 1958)
- (.4,.4) : approximately quantile unbiased (Cunnane)
- (.35,.35): APL, used with PWM
Parameters
----------
x : sequence
Input data, as a sequence or array of dimension at most 2.
prob : sequence
List of quantiles to compute.
alpha : {0.4, float} optional
Plotting positions parameter.
beta : {0.4, float} optional
Plotting positions parameter.
Notes
-----
I think the adjustments assume that there are no ties in order to be a reasonable
approximation to a continuous density function. TODO: check this
References
----------
unknown,
dates to original papers from Beasley, Erickson, Allison 2009 Behav Genet
"""
if isinstance(data, np.ma.MaskedArray):
if axis is None or data.ndim == 1:
return stats.mstats.plotting_positions(data, alpha=alpha, beta=beta)
else:
return ma.apply_along_axis(stats.mstats.plotting_positions, axis, data, alpha=alpha, beta=beta)
if masknan:
nanmask = np.isnan(data)
if nanmask.any():
marr = ma.array(data, mask=nanmask)
#code duplication:
if axis is None or data.ndim == 1:
marr = stats.mstats.plotting_positions(marr, alpha=alpha, beta=beta)
else:
marr = ma.apply_along_axis(stats.mstats.plotting_positions, axis, marr, alpha=alpha, beta=beta)
return ma.filled(marr, fill_value=np.nan)
data = np.asarray(data)
if data.size == 1: # use helper function instead
data = np.atleast_1d(data)
axis = 0
if axis is None:
data = data.ravel()
axis = 0
n = data.shape[axis]
if data.ndim == 1:
plpos = np.empty(data.shape, dtype=float)
plpos[data.argsort()] = (np.arange(1,n+1) - alpha)/(n+1.-alpha-beta)
else:
#nd assignment instead of second argsort doesn't look easy
plpos = (data.argsort(axis).argsort(axis) + 1. - alpha)/(n+1.-alpha-beta)
return plpos
meppf = plotting_positions
def plotting_positions_w1d(data, weights=None, alpha=0.4, beta=0.4,
method='notnormed'):
'''Weighted plotting positions (or empirical percentile points) for the data.
observations are weighted and the plotting positions are defined as
(ws-alpha)/(n-alpha-beta), where:
- ws is the weighted rank order statistics or cumulative weighted sum,
normalized to n if method is "normed"
- n is the number of values along the given axis if method is "normed"
and total weight otherwise
- alpha and beta are two parameters.
wtd.quantile in R package Hmisc seems to use the "notnormed" version.
notnormed coincides with unweighted segment in example, drop "normed" version ?
See Also
--------
plotting_positions : unweighted version that works also with more than one
dimension and has other options
'''
x = np.atleast_1d(data)
if x.ndim > 1:
raise ValueError('currently implemented only for 1d')
if weights is None:
weights = np.ones(x.shape)
else:
weights = np.array(weights, float, copy=False, ndmin=1) #atleast_1d(weights)
if weights.shape != x.shape:
raise ValueError('if weights is given, it needs to be the same'
'shape as data')
n = len(x)
xargsort = x.argsort()
ws = weights[xargsort].cumsum()
res = np.empty(x.shape)
if method == 'normed':
res[xargsort] = (1.*ws/ws[-1]*n-alpha)/(n+1.-alpha-beta)
else:
res[xargsort] = (1.*ws-alpha)/(ws[-1]+1.-alpha-beta)
return res
def edf_normal_inverse_transformed(x, alpha=3./8, beta=3./8, axis=0):
'''rank based normal inverse transformed cdf
'''
from scipy import stats
ranks = plotting_positions(data, alpha=alpha, beta=alpha, axis=0, masknan=False)
ranks_transf = stats.norm.ppf(ranks)
return ranks_transf
if __name__ == '__main__':
x = np.arange(5)
print plotting_positions(x)
x = np.arange(10).reshape(-1,2)
print plotting_positions(x)
print quantiles(x, axis=0)
print quantiles(x, axis=None)
print quantiles(x, axis=1)
xm = ma.array(x)
x2 = x.astype(float)
x2[1,0] = np.nan
print plotting_positions(xm, axis=0)
# test 0d, 1d
for sl1 in [slice(None), 0]:
print (plotting_positions(xm[sl1,0]) == plotting_positions(x[sl1,0])).all(),
print (quantiles(xm[sl1,0]) == quantiles(x[sl1,0])).all(),
print (stats.mstats.mquantiles(ma.fix_invalid(x2[sl1,0])) == quantiles(x2[sl1,0], masknan=1)).all(),
#test 2d
for ax in [0, 1, None, -1]:
print (plotting_positions(xm, axis=ax) == plotting_positions(x, axis=ax)).all(),
print (quantiles(xm, axis=ax) == quantiles(x, axis=ax)).all(),
print (stats.mstats.mquantiles(ma.fix_invalid(x2), axis=ax) == quantiles(x2, axis=ax, masknan=1)).all(),
#stats version doesn't have axis
print (stats.mstats.plotting_positions(ma.fix_invalid(x2)) == plotting_positions(x2, axis=None, masknan=1)).all(),
#test 3d
x3 = np.dstack((x,x)).T
for ax in [1,2]:
print (plotting_positions(x3, axis=ax)[0] == plotting_positions(x.T, axis=ax-1)).all(),
np.testing.assert_equal(plotting_positions(np.arange(10), alpha=0.35, beta=1-0.35), (1+np.arange(10)-0.35)/10)
np.testing.assert_equal(plotting_positions(np.arange(10), alpha=0.4, beta=0.4), (1+np.arange(10)-0.4)/(10+0.2))
np.testing.assert_equal(plotting_positions(np.arange(10)), (1+np.arange(10)-0.4)/(10+0.2))
print
print scoreatpercentile(x, [10,90])
print plotting_positions_w1d(x[:,0])
print (plotting_positions_w1d(x[:,0]) == plotting_positions(x[:,0])).all()
#weights versus replicating multiple occurencies of same x value
w1 = [1, 1, 2, 1, 1]
plotexample = 1
if plotexample:
import matplotlib.pyplot as plt
plt.figure()
plt.title('ppf, cdf values on horizontal axis')
plt.step(plotting_positions_w1d(x[:,0], weights=w1, method='0'), x[:,0], where='post')
plt.step(stats.mstats.plotting_positions(np.repeat(x[:,0],w1,axis=0)),np.repeat(x[:,0],w1,axis=0),where='post')
plt.plot(plotting_positions_w1d(x[:,0], weights=w1, method='0'), x[:,0], '-o')
plt.plot(stats.mstats.plotting_positions(np.repeat(x[:,0],w1,axis=0)),np.repeat(x[:,0],w1,axis=0), '-o')
plt.figure()
plt.title('cdf, cdf values on vertical axis')
plt.step(x[:,0], plotting_positions_w1d(x[:,0], weights=w1, method='0'),where='post')
plt.step(np.repeat(x[:,0],w1,axis=0), stats.mstats.plotting_positions(np.repeat(x[:,0],w1,axis=0)),where='post')
plt.plot(x[:,0], plotting_positions_w1d(x[:,0], weights=w1, method='0'), '-o')
plt.plot(np.repeat(x[:,0],w1,axis=0), stats.mstats.plotting_positions(np.repeat(x[:,0],w1,axis=0)), '-o')
plt.show()
| bsd-3-clause |
jf87/smap | python/smap/contrib/dtutil.py | 6 | 4241 | """
Copyright (c) 2011, 2012, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
'''
Datetime utilities. Convert between strings, timestamps and aware datetime objects.
Also, includes matplotlib helpers to setup date plotting.
@author Andrew Krioukov
'''
from dateutil.tz import *
import datetime, calendar
try:
import pytz
except ImportError:
pass
utc = gettz('UTC')
local = tzlocal()
def now(tzstr = 'UTC'):
'''Returns an aware datetime object with the current time in tzstr timezone'''
if tzstr == 'Local':
tz = local
else:
tz = gettz(tzstr)
return datetime.datetime.now(tz)
def strptime_tz(str, format='%x %X', tzstr='Local'):
'''Returns an aware datetime object. tzstr is a timezone string such as
'US/Pacific' or 'Local' by default which uses the local timezone.
'''
dt = datetime.datetime.strptime(str, format)
if tzstr == 'Local':
tz = local
else:
tz = gettz(tzstr)
return dt.replace(tzinfo = tz)
def strftime_tz(dt=None, format='%x %X', tzstr=None):
'''Returns a string from an aware datetime object. tzstr specifies the
timezone of the result. A value of None uses the datetime object's timezone
and a value of 'Local' uses the local system timezone.'''
if dt == None:
dt = now('Local')
if not dt.tzinfo:
raise ValueError('dt must be an aware datetime')
if tzstr:
if tzstr == 'Local':
tz = local
else:
tz = gettz(tzstr)
dt = dt.astimezone(tz)
return dt.strftime(format)
def dt2ts(dt):
'''Convert an aware datetime object to a UTC timestamp.'''
if not dt.tzinfo:
raise ValueError('dt must be an aware datetime')
return calendar.timegm(dt.utctimetuple())
def ts2dt(ts, tzinfo=utc):
'''Convert a UTC timestamp to an aware datetime object with UTC timezone'''
return datetime.datetime.utcfromtimestamp(ts).replace(tzinfo=tzinfo)
def ts2pylabts(ts, tzstr='UTC'):
'''Convert a UTC timestamp to float days since 0001-01-01 UTC.'''
tz = gettz(tzstr)
dt = datetime.datetime.utcfromtimestamp(ts).replace(tzinfo=tz)
dt_0 = datetime.datetime(year=1, month=1, day=1, tzinfo=gettz('UTC'))
# timedelta converts everything to days and seconds
delta = dt - dt_0
return delta.days + (delta.seconds / (3600. * 24))
def ts(str, format='%x %X', tzstr='Local'):
return dt2ts(strptime_tz(str, format, tzstr))
def iso8601(ts, tzinfo=utc):
return str(ts.astimezone(tzinfo)).replace(' ', 'T')
def excel(ts, tzinfo=utc):
# strip off the timezone offset and also
return ts.astimezone(tzinfo).strftime("%m/%d/%Y %H:%M:%S")
def olson(cname, offset):
"""Convert a libc name (like EST) to an olson tz name
(America/New_York)"""
zones = []
for name in pytz.common_timezones:
timezone = pytz.timezone(name)
if not hasattr(timezone, '_tzinfos'):
continue
for (utcoffset, daylight, tzname), _ in timezone._tzinfos.iteritems():
if tzname == cname and utcoffset == offset:
zones.append(name)
return zones
| bsd-2-clause |
hainm/scikit-learn | examples/cluster/plot_lena_segmentation.py | 271 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
gfyoung/pandas | pandas/tests/window/moments/test_moments_rolling.py | 2 | 15849 | import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame, Series, date_range
import pandas._testing as tm
def test_centered_axis_validation():
# ok
Series(np.ones(10)).rolling(window=3, center=True, axis=0).mean()
# bad axis
msg = "No axis named 1 for object type Series"
with pytest.raises(ValueError, match=msg):
Series(np.ones(10)).rolling(window=3, center=True, axis=1).mean()
# ok ok
DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=0).mean()
DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=1).mean()
# bad axis
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
(DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=2).mean())
@td.skip_if_no_scipy
def test_cmov_mean():
# GH 8238
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
result = Series(vals).rolling(5, center=True).mean()
expected_values = [
np.nan,
np.nan,
9.962,
11.27,
11.564,
12.516,
12.818,
12.952,
np.nan,
np.nan,
]
expected = Series(expected_values)
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window():
# GH 8238
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
result = Series(vals).rolling(5, win_type="boxcar", center=True).mean()
expected_values = [
np.nan,
np.nan,
9.962,
11.27,
11.564,
12.516,
12.818,
12.952,
np.nan,
np.nan,
]
expected = Series(expected_values)
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window_corner():
# GH 8238
# all nan
vals = Series([np.nan] * 10)
result = vals.rolling(5, center=True, win_type="boxcar").mean()
assert np.isnan(result).all()
# empty
vals = Series([], dtype=object)
result = vals.rolling(5, center=True, win_type="boxcar").mean()
assert len(result) == 0
# shorter than window
vals = Series(np.random.randn(5))
result = vals.rolling(10, win_type="boxcar").mean()
assert np.isnan(result).all()
assert len(result) == 5
@td.skip_if_no_scipy
@pytest.mark.parametrize(
"f,xp",
[
(
"mean",
[
[np.nan, np.nan],
[np.nan, np.nan],
[9.252, 9.392],
[8.644, 9.906],
[8.87, 10.208],
[6.81, 8.588],
[7.792, 8.644],
[9.05, 7.824],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"std",
[
[np.nan, np.nan],
[np.nan, np.nan],
[3.789706, 4.068313],
[3.429232, 3.237411],
[3.589269, 3.220810],
[3.405195, 2.380655],
[3.281839, 2.369869],
[3.676846, 1.801799],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"var",
[
[np.nan, np.nan],
[np.nan, np.nan],
[14.36187, 16.55117],
[11.75963, 10.48083],
[12.88285, 10.37362],
[11.59535, 5.66752],
[10.77047, 5.61628],
[13.51920, 3.24648],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"sum",
[
[np.nan, np.nan],
[np.nan, np.nan],
[46.26, 46.96],
[43.22, 49.53],
[44.35, 51.04],
[34.05, 42.94],
[38.96, 43.22],
[45.25, 39.12],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
],
)
def test_cmov_window_frame(f, xp):
# Gh 8238
df = DataFrame(
np.array(
[
[12.18, 3.64],
[10.18, 9.16],
[13.24, 14.61],
[4.51, 8.11],
[6.15, 11.44],
[9.14, 6.21],
[11.31, 10.67],
[2.94, 6.51],
[9.42, 8.39],
[12.44, 7.34],
]
)
)
xp = DataFrame(np.array(xp))
roll = df.rolling(5, win_type="boxcar", center=True)
rs = getattr(roll, f)()
tm.assert_frame_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_na_min_periods():
# min_periods
vals = Series(np.random.randn(10))
vals[4] = np.nan
vals[8] = np.nan
xp = vals.rolling(5, min_periods=4, center=True).mean()
rs = vals.rolling(5, win_type="boxcar", min_periods=4, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular(win_types):
# GH 8238
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
xps = {
"hamming": [
np.nan,
np.nan,
8.71384,
9.56348,
12.38009,
14.03687,
13.8567,
11.81473,
np.nan,
np.nan,
],
"triang": [
np.nan,
np.nan,
9.28667,
10.34667,
12.00556,
13.33889,
13.38,
12.33667,
np.nan,
np.nan,
],
"barthann": [
np.nan,
np.nan,
8.4425,
9.1925,
12.5575,
14.3675,
14.0825,
11.5675,
np.nan,
np.nan,
],
"bohman": [
np.nan,
np.nan,
7.61599,
9.1764,
12.83559,
14.17267,
14.65923,
11.10401,
np.nan,
np.nan,
],
"blackmanharris": [
np.nan,
np.nan,
6.97691,
9.16438,
13.05052,
14.02156,
15.10512,
10.74574,
np.nan,
np.nan,
],
"nuttall": [
np.nan,
np.nan,
7.04618,
9.16786,
13.02671,
14.03559,
15.05657,
10.78514,
np.nan,
np.nan,
],
"blackman": [
np.nan,
np.nan,
7.73345,
9.17869,
12.79607,
14.20036,
14.57726,
11.16988,
np.nan,
np.nan,
],
"bartlett": [
np.nan,
np.nan,
8.4425,
9.1925,
12.5575,
14.3675,
14.0825,
11.5675,
np.nan,
np.nan,
],
}
xp = Series(xps[win_types])
rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular_linear_range(win_types):
# GH 8238
vals = np.array(range(10), dtype=float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular_missing_data(win_types):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, np.nan, 10.63, 14.48]
)
xps = {
"bartlett": [
np.nan,
np.nan,
9.70333,
10.5225,
8.4425,
9.1925,
12.5575,
14.3675,
15.61667,
13.655,
],
"blackman": [
np.nan,
np.nan,
9.04582,
11.41536,
7.73345,
9.17869,
12.79607,
14.20036,
15.8706,
13.655,
],
"barthann": [
np.nan,
np.nan,
9.70333,
10.5225,
8.4425,
9.1925,
12.5575,
14.3675,
15.61667,
13.655,
],
"bohman": [
np.nan,
np.nan,
8.9444,
11.56327,
7.61599,
9.1764,
12.83559,
14.17267,
15.90976,
13.655,
],
"hamming": [
np.nan,
np.nan,
9.59321,
10.29694,
8.71384,
9.56348,
12.38009,
14.20565,
15.24694,
13.69758,
],
"nuttall": [
np.nan,
np.nan,
8.47693,
12.2821,
7.04618,
9.16786,
13.02671,
14.03673,
16.08759,
13.65553,
],
"triang": [
np.nan,
np.nan,
9.33167,
9.76125,
9.28667,
10.34667,
12.00556,
13.82125,
14.49429,
13.765,
],
"blackmanharris": [
np.nan,
np.nan,
8.42526,
12.36824,
6.97691,
9.16438,
13.05052,
14.02175,
16.1098,
13.65509,
],
}
xp = Series(xps[win_types])
rs = Series(vals).rolling(5, win_type=win_types, min_periods=3).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_special(win_types_special):
# GH 8238
kwds = {
"kaiser": {"beta": 1.0},
"gaussian": {"std": 1.0},
"general_gaussian": {"p": 2.0, "sig": 2.0},
"exponential": {"tau": 10},
}
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
xps = {
"gaussian": [
np.nan,
np.nan,
8.97297,
9.76077,
12.24763,
13.89053,
13.65671,
12.01002,
np.nan,
np.nan,
],
"general_gaussian": [
np.nan,
np.nan,
9.85011,
10.71589,
11.73161,
13.08516,
12.95111,
12.74577,
np.nan,
np.nan,
],
"kaiser": [
np.nan,
np.nan,
9.86851,
11.02969,
11.65161,
12.75129,
12.90702,
12.83757,
np.nan,
np.nan,
],
"exponential": [
np.nan,
np.nan,
9.83364,
11.10472,
11.64551,
12.66138,
12.92379,
12.83770,
np.nan,
np.nan,
],
}
xp = Series(xps[win_types_special])
rs = (
Series(vals)
.rolling(5, win_type=win_types_special, center=True)
.mean(**kwds[win_types_special])
)
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_special_linear_range(win_types_special):
# GH 8238
kwds = {
"kaiser": {"beta": 1.0},
"gaussian": {"std": 1.0},
"general_gaussian": {"p": 2.0, "sig": 2.0},
"slepian": {"width": 0.5},
"exponential": {"tau": 10},
}
vals = np.array(range(10), dtype=float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
rs = (
Series(vals)
.rolling(5, win_type=win_types_special, center=True)
.mean(**kwds[win_types_special])
)
tm.assert_series_equal(xp, rs)
def test_rolling_min_min_periods():
a = Series([1, 2, 3, 4, 5])
result = a.rolling(window=100, min_periods=1).min()
expected = Series(np.ones(len(a)))
tm.assert_series_equal(result, expected)
msg = "min_periods 5 must be <= window 3"
with pytest.raises(ValueError, match=msg):
Series([1, 2, 3]).rolling(window=3, min_periods=5).min()
def test_rolling_max_min_periods():
a = Series([1, 2, 3, 4, 5], dtype=np.float64)
b = a.rolling(window=100, min_periods=1).max()
tm.assert_almost_equal(a, b)
msg = "min_periods 5 must be <= window 3"
with pytest.raises(ValueError, match=msg):
Series([1, 2, 3]).rolling(window=3, min_periods=5).max()
def test_rolling_quantile_np_percentile():
# #9413: Tests that rolling window's quantile default behavior
# is analogous to Numpy's percentile
row = 10
col = 5
idx = date_range("20100101", periods=row, freq="B")
df = DataFrame(np.random.rand(row * col).reshape((row, -1)), index=idx)
df_quantile = df.quantile([0.25, 0.5, 0.75], axis=0)
np_percentile = np.percentile(df, [25, 50, 75], axis=0)
tm.assert_almost_equal(df_quantile.values, np.array(np_percentile))
@pytest.mark.parametrize("quantile", [0.0, 0.1, 0.45, 0.5, 1])
@pytest.mark.parametrize(
"interpolation", ["linear", "lower", "higher", "nearest", "midpoint"]
)
@pytest.mark.parametrize(
"data",
[
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[8.0, 1.0, 3.0, 4.0, 5.0, 2.0, 6.0, 7.0],
[0.0, np.nan, 0.2, np.nan, 0.4],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 0.1, np.nan, 0.3, 0.4, 0.5],
[0.5],
[np.nan, 0.7, 0.6],
],
)
def test_rolling_quantile_interpolation_options(quantile, interpolation, data):
# Tests that rolling window's quantile behavior is analogous to
# Series' quantile for each interpolation option
s = Series(data)
q1 = s.quantile(quantile, interpolation)
q2 = s.expanding(min_periods=1).quantile(quantile, interpolation).iloc[-1]
if np.isnan(q1):
assert np.isnan(q2)
else:
assert q1 == q2
def test_invalid_quantile_value():
data = np.arange(5)
s = Series(data)
msg = "Interpolation 'invalid' is not supported"
with pytest.raises(ValueError, match=msg):
s.rolling(len(data), min_periods=1).quantile(0.5, interpolation="invalid")
def test_rolling_quantile_param():
ser = Series([0.0, 0.1, 0.5, 0.9, 1.0])
msg = "quantile value -0.1 not in \\[0, 1\\]"
with pytest.raises(ValueError, match=msg):
ser.rolling(3).quantile(-0.1)
msg = "quantile value 10.0 not in \\[0, 1\\]"
with pytest.raises(ValueError, match=msg):
ser.rolling(3).quantile(10.0)
msg = "must be real number, not str"
with pytest.raises(TypeError, match=msg):
ser.rolling(3).quantile("foo")
def test_rolling_std_1obs():
vals = Series([1.0, 2.0, 3.0, 4.0, 5.0])
result = vals.rolling(1, min_periods=1).std()
expected = Series([np.nan] * 5)
tm.assert_series_equal(result, expected)
result = vals.rolling(1, min_periods=1).std(ddof=0)
expected = Series([0.0] * 5)
tm.assert_series_equal(result, expected)
result = Series([np.nan, np.nan, 3, 4, 5]).rolling(3, min_periods=2).std()
assert np.isnan(result[2])
def test_rolling_std_neg_sqrt():
# unit test from Bottleneck
# Test move_nanstd for neg sqrt.
a = Series(
[
0.0011448196318903589,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767,
]
)
b = a.rolling(window=3).std()
assert np.isfinite(b[2:]).all()
b = a.ewm(span=3).std()
assert np.isfinite(b[2:]).all()
| bsd-3-clause |
smartscheduling/scikit-learn-categorical-tree | examples/cluster/plot_cluster_comparison.py | 246 | 4684 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
onesuper/pandasticsearch | pandasticsearch/queries.py | 1 | 8168 | # -*- coding: UTF-8 -*-
try:
from collections.abc import MutableSequence
except ImportError:
from collections import MutableSequence
import json
import six
from pandasticsearch.errors import NoSuchDependencyException
class Query(MutableSequence):
def __init__(self):
super(Query, self).__init__()
self._values = None
self._result_dict = {}
self._took_millis = 0
def explain_result(self, result=None):
if isinstance(result, dict):
self._result_dict = result
self._took_millis = self._result_dict['took']
def to_pandas(self):
"""
Export the current query result to a Pandas DataFrame object.
"""
raise NotImplementedError('implemented in subclass')
def print_json(self):
indented_json = json.dumps(self._result_dict, sort_keys=True, separators=(',', ': '), indent=4,
ensure_ascii=False)
print(indented_json)
@property
def result(self):
return self._values
@property
def millis_taken(self):
return self._took_millis
@property
def json(self):
"""
Gets the original JSON representation returned by Elasticsearch REST API
:return: The JSON string indicating the query result
:rtype: string
"""
return json.dumps(self._result_dict)
def insert(self, index, value):
self._values.insert(index, value)
def append(self, value):
self._values.append(value)
def __str__(self):
return str(self._values)
def __len__(self):
return len(self._values)
def __delitem__(self, index):
del self._values[index]
def __setitem__(self, index, value):
self._values[index] = value
def __getitem__(self, index):
return self._values[index]
class Select(Query):
def __init__(self):
super(Select, self).__init__()
def resolve_fields(self, row):
fields = {}
for field in row:
nested_fields = {}
if isinstance(row[field], dict):
nested_fields = self.resolve_fields(row[field])
for n_field, val in nested_fields.items():
fields["{}.{}".format(field, n_field)] = val
else:
fields[field] = row[field]
return fields
def hit_to_row(self, hit):
row = {}
for k in hit.keys():
if k == '_source':
solved_fields = self.resolve_fields(hit['_source'])
row.update(solved_fields)
elif k.startswith('_'):
row[k] = hit[k]
return row
def explain_result(self, result=None):
super(Select, self).explain_result(result)
self._values = [self.hit_to_row(hit)
for hit in self._result_dict['hits']['hits']]
def to_pandas(self):
try:
import pandas
except ImportError:
raise NoSuchDependencyException('this method requires pandas library')
if self._values:
df = pandas.DataFrame(data=self._values)
return df
@staticmethod
def from_dict(d):
query = Select()
query.explain_result(d)
return query
@classmethod
def _stringfy_value(cls, value):
b = six.StringIO()
if value:
b.write(repr(value))
else:
b.write('(NULL)')
return b.getvalue()
def result_as_tabular(self, cols, n, truncate=20):
b = six.StringIO()
widths = []
tavnit = '|'
separator = '+'
cached_result = [kv for kv in self.result[:n]]
for col in cols:
maxlen = len(col)
for kv in cached_result:
if col in kv:
s = Select._stringfy_value(kv[col])
else:
s = '(NULL)'
if len(s) > maxlen:
maxlen = len(s)
widths.append(min(maxlen, truncate))
for w in widths:
tavnit += ' %-' + '%ss |' % (w,)
separator += '-' * w + '--+'
b.write(separator + '\n')
b.write(tavnit % tuple(cols) + '\n')
b.write(separator + '\n')
for kv in cached_result:
row = []
for col in cols:
if col in kv:
s = Select._stringfy_value(kv[col])
if len(s) > truncate:
s = s[:truncate - 3] + '...'
else:
s = '(NULL)'
row.append(s)
b.write(tavnit % tuple(row) + '\n')
b.write(separator + '\n')
return b.getvalue()
class ScrollSelect(Select):
"""
millis_taken/json not supported for ScrollSelect
"""
def __init__(self, hits_generator):
super(ScrollSelect, self).__init__()
self.hits_generator = hits_generator
@property
def result(self):
return [r for r in self.row_generator()]
def __str__(self):
return str(self.result)
def __len__(self):
return len(self.result)
def row_generator(self):
for hit in self.hits_generator():
yield self.hit_to_row(hit)
def to_pandas(self):
try:
import pandas
except ImportError:
raise NoSuchDependencyException('this method requires pandas library')
df = pandas.DataFrame(self.row_generator())
return df
class Agg(Query):
def __init__(self):
super(Agg, self).__init__()
self._index_names = None
self._indexes = None
def explain_result(self, result=None):
super(Agg, self).explain_result(result)
tuples = list(Agg._process_agg(self._result_dict['aggregations']))
assert len(tuples) > 0
self._index_names = list(tuples[0][0])
self._values = []
self._indexes = []
for t in tuples:
_, index, row = t
self._values.append(row)
if len(index) > 0:
self._indexes.append(index)
@property
def index(self):
return self._indexes
def to_pandas(self):
try:
import pandas
except ImportError:
raise NoSuchDependencyException('this method requires pandas library')
if self._values is not None:
if len(self._indexes) > 0:
index = pandas.MultiIndex.from_tuples(self._indexes, names=self._index_names)
df = pandas.DataFrame(data=self._values, index=index)
else:
df = pandas.DataFrame(data=self._values)
return df
@classmethod
def _process_agg(cls, bucket, indexes=(), names=()):
"""
Recursively extract agg values
:param bucket: a bucket contains either sub-buckets or a bunch of aggregated values
:return: a list of tuples: (index_name, index_tuple, row)
"""
# for each agg, yield a row
row = {}
for k, v in bucket.items():
if isinstance(v, dict):
if 'buckets' in v:
for sub_bucket in v['buckets']:
if 'key_as_string' in sub_bucket:
key = sub_bucket['key_as_string']
else:
key = sub_bucket['key']
for x in Agg._process_agg(sub_bucket,
indexes + (key,),
names + (k,)):
yield x
elif 'value' in v:
row[k] = v['value']
elif 'values' in v: # percentiles
row = v['values']
else:
row.update(v) # stats
else:
if k == 'doc_count': # count docs
row['doc_count'] = v
if len(row) > 0:
yield (names, indexes, row)
@staticmethod
def from_dict(d):
agg = Agg()
agg.explain_result(d)
return agg
| mit |
paulgclark/waveconverter | src/waveconverter_gui.py | 1 | 55783 | # This module contains the GUI code that interfaces with the glade xml file
import os
import waveConvertVars as wcv
from breakWave import basebandFileToList
from waveconverterEngine import decodeBaseband
from waveconverterEngine import packetsToFormattedString
from protocol_lib import ProtocolDefinition, getNextProtocolId
from protocol_lib import protocolSession
from protocol_lib import fetchProtocol
from waveConvertVars import protocol # NEED to eliminate
from waveconverterEngine import basebandTx
from waveconverterEngine import demodIQFile
from waveconverterEngine import buildTxList
from waveconverterEngine import decodeAllTx
from statEngine import computeStats
from statEngine import buildStatStrings
from statEngine import plugin_stats_stdout
from collections import Counter
import numpy as np
import webbrowser
from operator import itemgetter
from waveConvertVars import showAllTx
# for plotting baseband
try:
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from numpy import arange, pi, random, linspace
import matplotlib.cm as cm
from matplotlib.backends.backend_gtk3cairo import FigureCanvasGTK3Cairo as FigureCanvas
#from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg as FigureCanvas
except:
print "The WaveConverter GUI requires matplotlib. Exiting..."
exit(1)
# require Gtk 3.0+ to work
try:
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Pango, cairo, GObject # note gtk+3 uses Gtk, not gtk
except:
print "The WaveConverter GUI requires GTK 3.0. Exiting..."
exit(1)
#from IPython.core.payload import PayloadManager
class TopWindow:
spinButtonPressed = -1
def on_window1_destroy(self, object, data=None):
if wcv.verbose:
print "quit with cancel"
Gtk.main_quit()
def on_gtk_quit_activate(self, menuitem, data=None):
if wcv.verbose:
print "quit from menu"
Gtk.main_quit()
def on_gtk_about_activate(self, menuitem, data=None):
if wcv.verbose:
print "help about selected"
self.response = self.aboutdialog.run()
self.aboutdialog.hide()
# This function grabs all of the entries that the user has made into the GUI
# and stores them in the appropriate global variable. This function is called before
# demodulating, decoding or computing stats
def transferGUIDataToGlobals(self):
wcv.center_freq = 1000000.0 * self.getFloatFromEntry("centerFreqEntry")
wcv.samp_rate = 1000000.0 * self.getFloatFromEntry("sampRateEntry")
wcv.timingError = self.getFloatFromEntry("unitTimingErrorEntry")/100.0
wcv.timeBetweenTx = self.getIntFromEntry("interPacketWidthEntry")
wcv.glitchFilterCount = self.getIntFromEntry("glitchFilterEntry")
wcv.protocol.convertTimingToSamples(wcv.basebandSampleRate)
# This function grabs all of the entries that the user has made into the GUI
# and stores them in the active protocol object. This function is called before
# saving or using any protocol
def transferGUIDataToProtocol(self):
## get all of the values entered on the demodulation tab
wcv.iqFileName = self.getStringFromEntry("iqFileNameEntry")
wcv.waveformFileName = self.getStringFromEntry("bbFileNameEntry")
wcv.center_freq = 1000000.0 * self.getFloatFromEntry("centerFreqEntry")
wcv.samp_rate = 1000000.0 * self.getFloatFromEntry("sampRateEntry")
wcv.protocol.modulation = self.getIntFromEntryBox("modulationEntryBox")
wcv.protocol.frequency = 1000000.0 * self.getFloatFromEntry("frequencyEntry")
wcv.protocol.frequencyHopList = self.getListFromEntry("frequencyHopListEntry")
wcv.protocol.channelWidth = 1000.0 * self.getFloatFromEntry("channelWidthEntry")
wcv.protocol.transitionWidth = 1000.0 * self.getFloatFromEntry("transitionWidthEntry")
wcv.protocol.threshold = self.getFloatFromEntry("thresholdEntry")
wcv.protocol.fskSquelchLeveldB = self.getFloatFromEntry("fskSquelchEntry")
# may not have an FSK deviation value entered if user is working with OOK
try:
wcv.protocol.fskDeviation = 1000.0 * self.getFloatFromEntry("fskDeviationEntry")
except:
wcv.protocol.fskDeviation = 0.0
# get baseband frequency from protocol
wcv.protocol.bb_samp_rate = 1000000.0 * self.getFloatFromEntry("bbSampRateEntry")
wcv.basebandSampleRate = wcv.protocol.bb_samp_rate
wcv.protocol.interPacketSymbol = self.getIntFromEntryBox("idleLevelEntryBox")
# get framing properties
wcv.protocol.preambleType = self.getIntFromEntryBox("preambleTypeEntryBox")
wcv.protocol.preambleSymbolLow = self.getIntFromEntry("preambleLowEntry")
wcv.protocol.preambleSymbolHigh = self.getIntFromEntry("preambleHighEntry")
# the preamble size is a list of possible values
wcv.protocol.preambleSize[0] = self.getIntFromEntry("preambleSize1Entry")
# there may not be a second value entered into the GUI
try:
wcv.protocol.preambleSize[1] = self.getIntFromEntry("preambleSize2Entry")
except:
wcv.protocol.preambleSize[1] = 0
wcv.protocol.headerWidth = self.getIntFromEntry("headerLengthEntry")
wcv.protocol.interPacketWidth = self.getIntFromEntry("interPacketWidthEntry")
wcv.protocol.arbPreambleList = self.getListFromEntry("arbitraryPreambleTimingEntry")
wcv.protocol.preamblePulseCount = self.getIntFromEntry("preamblePulseCountEntry")
# get decode properties
wcv.protocol.encodingType = self.getIntFromEntryBox("encodingEntryBox")
wcv.protocol.unitWidth = self.getIntFromEntry("payloadUnitEntry")
if self.getIntFromEntryBox("pwmOrderEntryBox") == 1:
wcv.protocol.pwmSymbolOrder01 = False
else:
wcv.protocol.pwmSymbolOrder01 = True
wcv.protocol.pwmZeroSymbol[0] = self.getIntFromEntry("pwmZeroLowEntry")
wcv.protocol.pwmZeroSymbol[1] = self.getIntFromEntry("pwmZeroHighEntry")
wcv.protocol.pwmOneSymbol[0] = self.getIntFromEntry("pwmOneLowEntry")
wcv.protocol.pwmOneSymbol[1] = self.getIntFromEntry("pwmOneHighEntry")
wcv.protocol.packetSize = self.getIntFromEntry("numPayloadBitsEntry")
# load CRC properties
wcv.protocol.crcPoly = self.getListFromEntry("crcPolynomialEntry")
for i in xrange(wcv.NUM_CRC):
wcv.protocol.crcAddr[i][0] = self.getIntFromEntry("crc" + str(i+1) + "StartAddrEntry")
wcv.protocol.crcAddr[i][1] = wcv.protocol.crcAddr[i][0] + len(wcv.protocol.crcPoly) - 2 # poly is one longer than actual range
wcv.protocol.crcData[i][0] = self.getIntFromEntry("crc" + str(i+1) + "DataLowEntry")
wcv.protocol.crcData[i][1] = self.getIntFromEntry("crc" + str(i+1) + "DataHighEntry")
wcv.protocol.crcInit = self.getIntFromEntry("crcInitEntry")
wcv.protocol.crcBitOrder = self.getIntFromEntryBox("crcReflectEntryBox")
wcv.protocol.crcReverseOut = self.getIntFromEntryBox("crcReverseOutEntryBox")
wcv.protocol.crcFinalXor = self.getListFromEntry("crcFinalXorEntry")
wcv.protocol.crcPad = self.getIntFromEntryBox("crcPadEntryBox")
wcv.protocol.crcPadCount = 8*self.getIntFromEntryBox("crcPadCountEntryBox")
# get ACS properties
wcv.protocol.acsLength = self.getIntFromEntry("acsBitLengthEntry")
wcv.protocol.acsInvertData = bool(self.getIntFromEntryBox("acsInvertEntryBox"))
for i in xrange(wcv.NUM_ACS):
wcv.protocol.acsInitSum[i] = self.getIntFromEntry("acs" + str(i+1) + "InitEntry")
wcv.protocol.acsAddr[i][0] = self.getIntFromEntry("acs" + str(i+1) + "AddrLowEntry")
wcv.protocol.acsAddr[i][1] = self.getIntFromEntry("acs" + str(i+1) + "AddrHighEntry")
wcv.protocol.acsData[i][0] = self.getIntFromEntry("acs" + str(i+1) + "DataLowEntry")
wcv.protocol.acsData[i][1] = self.getIntFromEntry("acs" + str(i+1) + "DataHighEntry")
# get stats properties
for i in xrange(wcv.NUM_ID_FIELDS):
wcv.protocol.idAddr[i][0] = self.getIntFromEntry("id"+str(i+1)+"AddrLowEntry")
wcv.protocol.idAddr[i][1] = self.getIntFromEntry("id"+str(i+1)+"AddrHighEntry")
for i in xrange(wcv.NUM_VAL_FIELDS):
wcv.protocol.valAddr[i][0] = self.getIntFromEntry("val"+str(i+1)+"AddrLowEntry")
wcv.protocol.valAddr[i][1] = self.getIntFromEntry("val"+str(i+1)+"AddrHighEntry")
# these parameters are currently unused but must be in protocol to keep sqllite happy
wcv.protocol.headerLevel = 0
wcv.protocol.preambleSync = False
wcv.protocol.crcPadVal = 0
# when we load new values for the protocol, we need to do the
# conversion from microseconds to samples
wcv.protocol.convertTimingToSamples(wcv.basebandSampleRate)
def on_loadProtocol_clicked(self, menuitem, data=None):
if wcv.verbose:
print "load protocol dialog started"
# generate liststore from protocols in database
protocolStore = Gtk.ListStore(int, str, str, str, str, str, int) # ID, type, make, model, year, modulation, freq
for proto in protocolSession.query(ProtocolDefinition):
# use strings to display modulation and device types
if proto.modulation == 0:
modString = "OOK"
else:
modString = "FSK"
devTypeString = wcv.devTypeStrings[proto.deviceType]
protocolStore.append([proto.protocolId,
devTypeString,
proto.deviceMake,
proto.deviceModel,
proto.deviceYear,
modString,
proto.frequency])
if wcv.verbose:
print "adding protocol to selection list: " + str(proto.protocolId) + \
" " + devTypeString + \
" " + proto.deviceMake + " " + proto.deviceModel + " " + proto.deviceYear + \
" " + modString + " " + str(proto.frequency)
self.protocolTreeView.set_model(protocolStore)
self.protocolName = self.protocolDialog.run()
self.protocolDialog.hide()
# when the user clicks on a protocol selection, store the index
# of the selection
def on_protocolTreeView_selection_changed(self, data=None):
try:
(model, pathlist) = self.protocolTreeSelection.get_selected_rows()
tree_iter = model.get_iter(pathlist[0])
self.currentProtocolDialogSelection = model.get_value(tree_iter,0)
except:
self.currentProtocolDialogSelection = 0 # first time through, the list will not exist
if wcv.verbose:
print "Current Selection: " + str(self.currentProtocolDialogSelection)
# when the user clicks the OK button, read the selected protocol from the
# database, then hide the dialog
def on_protocolDialogOKButton_clicked(self, data=None):
if wcv.verbose:
print "dialog OK clicked"
wcv.protocol = fetchProtocol(self.currentProtocolDialogSelection)
self.populateProtocolToGui(protocol)
self.protocolDialog.hide()
def on_protocolDialogCancelButton_clicked(self, data=None):
if wcv.verbose:
print "dialog cancel clicked"
self.protocolDialog.hide()
def on_saveProtocol_clicked(self, menuitem, data=None):
if wcv.verbose:
print "save protocol dialog started"
# pull in all the user-entered data and save to current protocol
self.transferGUIDataToProtocol()
# store protocol in database under current ID
wcv.protocol.saveProtocol()
if wcv.verbose:
print wcv.protocol.fullProtocolString()
# this function is called when the toolbar "save as" button is clicked,
# it brings up a dialog asking the user for a protocol name for the new
# protocol to be stored under
def on_saveAsProtocol_clicked(self, menuitem, data=None):
if wcv.verbose:
print "save as protocol dialog started"
# clear any existing text in dialog window (previously entered protocol info)
self.setEntry("protocolSaveAsDeviceMakeEntry", "")
self.setEntry("protocolSaveAsDeviceModelEntry", "")
self.setEntry("protocolSaveAsDeviceYearEntry", "")
self.setEntryBox("protocolSaveAsDeviceTypeEntryBox", -1)
# bring up "Save As" dialog
self.protocolSaveAsReturn = self.protocolSaveAsDialog.run()
self.protocolSaveAsDialog.hide()
# when the user clicks the OK button, read the selected protocol from the
# database, then hide the dialog
def on_protocolSaveAsOKButton_clicked(self, data=None):
if wcv.verbose:
print "SAVE-AS OK clicked"
# pull in all the user-entered data and save to current protocol, plus protocol name
wcv.protocol = ProtocolDefinition(getNextProtocolId())
self.transferGUIDataToProtocol()
wcv.protocol.deviceMake = self.getStringFromEntry("protocolSaveAsDeviceMakeEntry")
wcv.protocol.deviceModel = self.getStringFromEntry("protocolSaveAsDeviceModelEntry")
wcv.protocol.deviceYear = self.getIntFromEntry("protocolSaveAsDeviceYearEntry")
wcv.protocol.deviceType = self.getIntFromEntryBox("protocolSaveAsDeviceTypeEntryBox")
if wcv.verbose:
print wcv.protocol.fullProtocolString()
# store protocol in database under current ID
wcv.protocol.saveProtocol()
# NEED: check if name already exists, if it does, prompt for new name (loop until name is OK)
self.protocolSaveAsDialog.hide()
def on_protocolSaveAsCancelButton_clicked(self, data=None):
if wcv.verbose:
print "SAVE-AS cancel button clicked"
self.protocolSaveAsDialog.hide()
def on_gtk_rfFileOpen_activate(self, menuitem, data=None):
if wcv.verbose:
print "menu RF File Open"
self.fcd = Gtk.FileChooserDialog("Open IQ File...",
None,
Gtk.FileChooserAction.OPEN,
(Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN,
Gtk.ResponseType.ACCEPT))
self.response = self.fcd.run()
if self.response == Gtk.ResponseType.ACCEPT:
print "Selected filepath: %s" % self.fcd.get_filename()
wcv.iqFileName = self.fcd.get_filename()
iqFileNameEntryWidget = self.builder.get_object("iqFileNameEntry")
Gtk.Entry.set_text(iqFileNameEntryWidget, str(wcv.iqFileName))
self.fcd.destroy()
def on_gtk_bbFileOpen_activate(self, menuitem, data=None):
if wcv.verbose:
print "menu BB File Open"
self.fcd = Gtk.FileChooserDialog("Open BB File...",
None,
Gtk.FileChooserAction.OPEN,
(Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN,
Gtk.ResponseType.ACCEPT))
self.response = self.fcd.run()
if self.response == Gtk.ResponseType.ACCEPT:
print "Selected filepath: %s" % self.fcd.get_filename()
wcv.waveformFileName = self.fcd.get_filename()
bbFileNameEntryWidget = self.builder.get_object("bbFileNameEntry")
Gtk.Entry.set_text(bbFileNameEntryWidget, str(wcv.waveformFileName))
self.fcd.destroy()
def on_userGuideMenu_activate(self, menuitem, data=None):
if wcv.verbose:
print "opening user guide..."
# get path of doc directory (assuming this is running in src subdir of install directory
os.chdir('../doc')
docPath = os.getcwd()
webbrowser.open('file://' + docPath + '/user_guide.pdf')
def changeTxNumberToView(self, txNum):
wcv.tMin = 0
wcv.tMax = 100
if wcv.verbose:
print "Selecting TX #" + str(txNum)
if txNum < len(wcv.txList):
wcv.txNum = txNum
self.drawBasebandPlot(wcv.txList[wcv.txNum].waveformData,
wcv.tMin, wcv.tMax, wcv.basebandSampleRate)
else:
print "Reached end of transmission list"
def on_transmissionNumberSelect2_value_changed(self, spinButton, data=None):
# find out if this was the button pressed
if self.spinButtonPressed == -1:
self.spinButtonPressed = 2
txNum = spinButton.get_value_as_int() - 1 # button counts from 1 to n; array from 0 to n-1
# first update the other two widgets in other tabs
self.setSpinButtonValue("transmissionNumberSelect", txNum + 1)
self.setSpinButtonValue("transmissionNumberSelect1", txNum + 1)
# then change the view
self.changeTxNumberToView(txNum)
# now return control
self.spinButtonPressed = -1
def on_transmissionNumberSelect1_value_changed(self, spinButton, data=None):
# find out if this was the button pressed
if self.spinButtonPressed == -1:
self.spinButtonPressed = 1
txNum = spinButton.get_value_as_int() - 1 # button counts from 1 to n; array from 0 to n-1
# first update the other two widgets in other tabs
self.setSpinButtonValue("transmissionNumberSelect", txNum + 1)
self.setSpinButtonValue("transmissionNumberSelect2", txNum + 1)
# then change the view
self.changeTxNumberToView(txNum)
# now return control
self.spinButtonPressed = -1
def on_transmissionNumberSelect_value_changed(self, spinButton, data=None):
# find out if this was the button pressed
if self.spinButtonPressed == -1:
self.spinButtonPressed = 0
txNum = spinButton.get_value_as_int() - 1 # button counts from 1 to n; array from 0 to n-1
# first update the other two widgets in other tabs
self.setSpinButtonValue("transmissionNumberSelect1", txNum + 1)
self.setSpinButtonValue("transmissionNumberSelect2", txNum + 1)
self.changeTxNumberToView(txNum)
# now return control
self.spinButtonPressed = -1
def on_panRightButton_clicked(self, button, data=None):
if wcv.verboseZoom:
print "Panning Right"
# get center point of current plot
center = (wcv.tMax + wcv.tMin)/2.0
# get current zoom size
zoomSize = 1.0*(wcv.tMax - wcv.tMin)
# change center by increasing it to midway between current center
# and right-most extent
center += zoomSize/4
# update extents and redraw
wcv.tMin = (center - zoomSize/2.0) # int((center - zoomSize/2.0) + 0.5)
wcv.tMax = (center + zoomSize/2.0) # int((center + zoomSize/2.0) + 0.5)
# trap for panning right past max extent
if wcv.tMax > 100:
wcv.tMax = 100
wcv.tMin = 100 - zoomSize
self.drawBasebandPlot(wcv.txList[wcv.txNum].waveformData,
wcv.tMin, wcv.tMax, wcv.basebandSampleRate)
def on_panLeftButton_clicked(self, button, data=None):
if wcv.verboseZoom:
print "Panning Left"
# get center point of current plot
center = (wcv.tMax + wcv.tMin)/2.0
# get current zoom size
zoomSize = 1.0*(wcv.tMax - wcv.tMin)
# change center by decreasing it to midway between current center
# and left-most extent
center -= zoomSize/4
# update extents and redraw
wcv.tMin = (center - zoomSize/2.0) # int((center - zoomSize/2.0) + 0.5)
wcv.tMax = (center + zoomSize/2.0) # int((center + zoomSize/2.0) + 0.5)
# trap for panning left past min extent
if wcv.tMin < 0:
wcv.tMin = 0
wcv.tMax = zoomSize
self.drawBasebandPlot(wcv.txList[wcv.txNum].waveformData,
wcv.tMin, wcv.tMax, wcv.basebandSampleRate)
def on_zoomFullButton_clicked(self, button, data=None):
if wcv.verboseZoom:
print "Zooming Out Full"
wcv.tMin = 0.0 # 0
wcv.tMax = 100.0 # 100
self.drawBasebandPlot(wcv.txList[wcv.txNum].waveformData,
wcv.tMin, wcv.tMax, wcv.basebandSampleRate)
def on_zoomInButton_clicked(self, button, data=None):
if wcv.verboseZoom:
print "Zooming In"
# get center point of current plot
center = (wcv.tMax + wcv.tMin)/2.0
# get current zoom size and cut in half
zoomSize = (wcv.tMax - wcv.tMin)/2.0
wcv.tMin = (center - zoomSize/2.0) # int((center - zoomSize/2.0) + 0.5)
wcv.tMax = (center + zoomSize/2.0) # int((center + zoomSize/2.0) + 0.5)
self.drawBasebandPlot(wcv.txList[wcv.txNum].waveformData,
wcv.tMin, wcv.tMax, wcv.basebandSampleRate)
def on_zoomOutButton_clicked(self, button, data=None):
if wcv.verboseZoom:
print "Zooming Out"
# get center point of current plot
center = (wcv.tMax + wcv.tMin)/2.0
# get current zoom size and double
zoomSize = (wcv.tMax - wcv.tMin)*2.0
if wcv.verboseZoom:
print "center: " + str(center)
print "zoomSize: " + str(zoomSize)
wcv.tMin = (center - zoomSize/2.0) # int((center - zoomSize/2.0) + 0.5)
wcv.tMax = (center + zoomSize/2.0) # int((center + zoomSize/2.0) + 0.5)
if wcv.verbose:
print "tMin: " + str(wcv.tMin)
print "tMax: " + str(wcv.tMax)
# trap for zoom out past max extent
if wcv.tMin < 0:
wcv.tMin = 0.0 # 0
if wcv.tMax > 100:
wcv.tMax = 100.0 # 100
self.drawBasebandPlot(wcv.txList[wcv.txNum].waveformData,
wcv.tMin, wcv.tMax, wcv.basebandSampleRate)
#
def on_showAllButton_toggled(self, button, data=None):
wcv.showAllTx = self.getBoolFromToolToggle("showAllButton")
if wcv.verboseZoom:
print "View Stats for All TX changed to " + str(wcv.showAllTx)
# if stats are up, redo them
if not wcv.bitProbString == "":
self.on_runStat_clicked(button)
def on_hexButton_toggled(self, button, data=None):
wcv.outputHex = self.getBoolFromToolToggle("hexButton")
if wcv.verbose:
print "Hex Output Mode change to " + str(wcv.outputHex)
# if tx display is are up, redo them
if len(wcv.txList) != 0:
self.on_Decode_clicked(button)
# if stats are up, redo them
if not wcv.bitProbString == "":
self.on_runStat_clicked(button)
# the following functions grab values from the GUI widgets, consolidating the two lines
# of code into one. Each function takes the text name of the widget and returns its current
# value. Conversely, it assigns the value to the widget.
def getIntFromEntry(self, widgetName):
tempWidget = self.builder.get_object(widgetName)
return int(tempWidget.get_text())
def getFloatFromEntry(self, widgetName):
tempWidget = self.builder.get_object(widgetName)
return float(tempWidget.get_text())
def getStringFromEntry(self, widgetName):
tempWidget = self.builder.get_object(widgetName)
return str(tempWidget.get_text())
def getIntFromEntryBox(self, widgetName):
tempWidget = self.builder.get_object(widgetName)
return int(tempWidget.get_active())
def getBoolFromToolToggle(self, widgetName):
tempWidget = self.builder.get_object(widgetName)
return tempWidget.get_active()
def getBoolFromEntryBox(self, widgetName):
tempWidget = self.builder.get_object(widgetName)
intVal = int(tempWidget.get_active())
if intVal == 0:
return(False)
else:
return(True)
def getListFromEntry(self, widgetName):
tempWidget = self.builder.get_object(widgetName)
listString = tempWidget.get_text().strip('[]') # resolves to string of comma-separated values
listItemsText = listString.split(',')
tempList = []
# first check if we have an empty list
if not listItemsText or listItemsText == ['']:
return []
# otherwise build the list and return it
for item in listItemsText:
tempList.append(int(item))
return tempList
#def setIntToEntry(self, widgetName, value):
# tempWidget = self.builder.get_object(widgetName)
# Gtk.Entry.set_text(tempWidget, str(value))
#def setFloatToEntry(self, widgetName, value):
# tempWidget = self.builder.get_object(widgetName)
# Gtk.Entry.set_text(tempWidget, str(value))
def setEntry(self, widgetName, value):
tempWidget = self.builder.get_object(widgetName)
Gtk.Entry.set_text(tempWidget, str(value))
def setEntryBox(self, widgetName, value):
tempWidget = self.builder.get_object(widgetName)
Gtk.ComboBox.set_active(tempWidget, value)
def setLabel(self, widgetName, value, style = 0):
tempWidget = self.builder.get_object(widgetName)
if style == 1:
Gtk.Label.set_markup(tempWidget, value)
else:
Gtk.Label.set_text(tempWidget, value)
def setSpinButtonValue(self, widgetName, value):
tempWidget = self.builder.get_object(widgetName)
Gtk.SpinButton.set_value(tempWidget, value)
#def setIntToEntryBox(self, widgetName, value):
# tempWidget = self.builder.get_object(widgetName)
# Gtk.ComboBox.set_active(tempWidget, value)
#def setListToEntry(self, widgetName, value):
# tempWidget = self.builder.get_object(widgetName)
# print "fill"
def deactivateEntry(self, widgetName):
tempWidget = self.builder.get_object(widgetName)
tempWidget.set_sensitive(False)
def activateEntry(self, widgetName):
tempWidget = self.builder.get_object(widgetName)
tempWidget.set_sensitive(True)
# plots the input list as a waveform, displaying the data between
# tMin and tMax, which are percentages of the duration of the waveform. For example,
# 0, 100 shows the entire waveform (0% to 100%), while 25, 75 shows the middle half;
# waveformSampleRate provides the timing info to compute the horizontal axis
def drawBasebandPlot(self, waveformDataList, tMin, tMax, waveformSampleRate):
# decimate large input lists; cairo can only handle 18980 point plots
if len(waveformDataList) > 18000:
# NEED to replace this with decimated waveform, not truncated
if wcv.verboseZoom:
print "Baseband waveform longer than 18k samples, decimating..."
decimationFactor = 1 + int(len(waveformDataList)/18000)
localWaveform = waveformDataList[::decimationFactor]
else:
localWaveform = list(waveformDataList) # make local copy
decimationFactor = 1
# for use in building the horizontal axis labels
waveformLength = len(waveformDataList)
startIndex = int((tMin/100.0) * waveformLength)
stopIndex = int((tMax/100.0) * waveformLength)
# for operating on the actual plot data that's been decimated above
waveformLengthDecimated = len(localWaveform)
startIndexDecimated = int((tMin/100.0) * waveformLengthDecimated)
stopIndexDecimated = int((tMax/100.0) * waveformLengthDecimated)
# compute plot area in milliseconds
stepSize = (1/waveformSampleRate) * 1000.0
startTime = startIndex * stepSize
stopTime = stopIndex * stepSize
if wcv.verboseZoom:
print "displaying new plot"
print "list size = " + str(waveformLength)
print "tMin(%) = " + str(tMin)
print "tMax(%) = " + str(tMax)
print "start Index = " + str(startIndexDecimated)
print "stop Index = " + str(stopIndexDecimated)
print "start time = " + str(startTime)
print "stop time = " + str(stopTime)
t = arange(startTime, stopTime, stepSize*decimationFactor)
s = localWaveform[startIndexDecimated:stopIndexDecimated]
# sometimes t and s arrays are sized differently, probably due to rounding
minSize = min(len(t), len(s))
if len(t) != minSize:
t = t[0:minSize]
if len(s) != minSize:
s = s[0:minSize]
if wcv.verboseZoom:
print "length of waveform list: " + str(len(s))
print "step size: " + str(stepSize)
print "length of time vector: " + str(len(t))
self.axis.clear() # clear plot before re-plotting
self.axis.plot(t,s)
self.axis.grid(True)
self.axis.axis([startTime, stopTime, -0.1, 1.1])
self.axis.set_xlabel('time (ms)') # replace with time unit
self.axis.set_ylabel('Baseband Signal') # replace with ???
self.canvas.draw()
self.canvas1.draw()
self.canvas2.draw()
# when the RF Demod button is pushed, we need to take all the settings
# that the user has entered into the gui and place them in the current
# protocol object; we then call a flowgraph object with the RF
# configuration specified
def on_Demodulate_clicked(self, button, data=None):
print "pushed RF Demod Button"
# get values from GUI
self.transferGUIDataToGlobals()
self.transferGUIDataToProtocol()
# if we have a baseband file name, use it to get the bb data
if len(wcv.waveformFileName) > 0:
wcv.basebandData = basebandFileToList(wcv.waveformFileName)
elif len(wcv.iqFileName) > 0:
wcv.basebandData = demodIQFile(verbose = wcv.verbose,
modulationType = wcv.protocol.modulation,
iqSampleRate = wcv.samp_rate,
basebandSampleRate = wcv.basebandSampleRate,
centerFreq = wcv.center_freq,
frequency = wcv.protocol.frequency,
frequencyHopList = wcv.protocol.frequencyHopList,
channelWidth = wcv.protocol.channelWidth,
transitionWidth = wcv.protocol.transitionWidth,
threshold = wcv.protocol.threshold,
fskSquelch = wcv.protocol.fskSquelchLeveldB,
fskDeviation = wcv.protocol.fskDeviation,
iqFileName = wcv.iqFileName,
waveformFileName = ""
)
else:
print "No IQ or baseband file given"
return 0
# read baseband waveform data from file
if wcv.verbose:
print "baseband data length (raw): " + str(len(wcv.basebandData))
# split the baseband into individual transmissions and then store each
# in its own transmission list, to be decoded later
wcv.txList = buildTxList(basebandData = wcv.basebandData,
basebandSampleRate = wcv.basebandSampleRate,
interTxTiming = wcv.protocol.interPacketWidth_samp,
glitchFilterCount = wcv.glitchFilterCount,
interTxLevel = wcv.protocol.interPacketSymbol,
verbose = wcv.verbose)
# debug only
if wcv.verbose:
print "Number of transmissions broken down: " + str(len(wcv.txList))
for tx in wcv.txList:
print "tx waveform list length: " + str(len(tx.waveformData))
if len(wcv.txList) == 0:
self.setLabel("signalCountLabel", "<b>NO SIGNALS FOUND</b>", 1) # NEED: use bold and/or red text?
self.setLabel("signalCountLabel1", "<b>NO SIGNALS FOUND</b>", 1)
self.setLabel("signalCountLabel2", "<b>NO SIGNALS FOUND</b>", 1)
print "NO SIGNALS FOUND AFTER DEMODULATION"
return(1)
else:
self.setLabel("signalCountLabel", "Signals Found: " + str(len(wcv.txList)))
self.setLabel("signalCountLabel1", "Signals Found: " + str(len(wcv.txList)))
self.setLabel("signalCountLabel2", "Signals Found: " + str(len(wcv.txList)))
# now plot the first transmission, zoomed out
wcv.tMin = 0
wcv.tMax = 100
#if wcv.verbose:
# print "txListLength: " + str(len(wcv.txList[0].waveformData)) + " tMin/Max: " + str(wcv.tMin) + " " + str(wcv.tMax) + " bbsr: " + str(wcv.basebandSampleRate)
# print wcv.txList[0].waveformData
self.drawBasebandPlot(wcv.txList[0].waveformData, wcv.tMin, wcv.tMax, wcv.basebandSampleRate)
# set range for the tx-select spin button
self.txSelectSpinButton.set_range(1, len(wcv.txList)+1)
self.txSelectSpinButton1.set_range(1, len(wcv.txList)+1)
self.txSelectSpinButton2.set_range(1, len(wcv.txList)+1)
# update the transmission status
if wcv.verbose:
print "Baseband separated into individual transmissions."
def on_modulationEntryBox_changed(self, data=None):
wcv.protocol.modulation = self.getBoolFromEntryBox("modulationEntryBox")
if wcv.protocol.modulation == wcv.MOD_OOK:
self.activateEntry("thresholdEntry")
self.deactivateEntry("fskDeviationEntry")
self.deactivateEntry("fskSquelchEntry")
elif wcv.protocol.modulation == wcv.MOD_FSK:
self.deactivateEntry("thresholdEntry")
self.activateEntry("fskDeviationEntry")
self.activateEntry("fskSquelchEntry")
def on_encodingEntryBox_changed(self, data=None):
wcv.protocol.encodingType = self.getIntFromEntryBox("encodingEntryBox")
# if one of the Manchester types, deactivate the PWM entry boxes and activate the unit entry
if wcv.protocol.encodingType == wcv.STD_MANCHESTER or wcv.protocol.encodingType == wcv.INV_MANCHESTER:
self.activateEntry("payloadUnitEntry")
self.deactivateEntry("pwmZeroLowEntry")
self.deactivateEntry("pwmZeroHighEntry")
self.deactivateEntry("pwmOneLowEntry")
self.deactivateEntry("pwmOneHighEntry")
# for the NO ENCODING state, we need only the unit timing
elif wcv.protocol.encodingType == wcv.NO_ENCODING:
self.deactivateEntry("payloadUnitEntry")
self.activateEntry("pwmZeroLowEntry")
self.deactivateEntry("pwmZeroHighEntry")
self.deactivateEntry("pwmOneLowEntry")
self.activateEntry("pwmOneHighEntry")
# if PWM/PIE, deactivate unit entry boxes and activate the PWM entries
else:
self.deactivateEntry("payloadUnitEntry")
self.activateEntry("pwmZeroLowEntry")
self.activateEntry("pwmZeroHighEntry")
self.activateEntry("pwmOneLowEntry")
self.activateEntry("pwmOneHighEntry")
# create signal from preamble type box and gray out all the unused properties
def on_preambleTypeEntryBox_changed(self, data=None):
wcv.protocol.preambleType = self.getIntFromEntryBox("preambleTypeEntryBox")
# if we are doing a regular preamble, then gray out the arbitrary entry
if wcv.protocol.preambleType == wcv.PREAMBLE_REG:
self.activateEntry("preambleLowEntry")
self.activateEntry("preambleHighEntry")
self.activateEntry("preambleSize1Entry")
self.activateEntry("preambleSize2Entry")
self.activateEntry("headerLengthEntry")
self.deactivateEntry("arbitraryPreambleTimingEntry")
self.deactivateEntry("preamblePulseCountEntry")
# else gray out everything but the arbitrary entry
elif wcv.protocol.preambleType == wcv.PREAMBLE_ARB:
self.deactivateEntry("preambleLowEntry")
self.deactivateEntry("preambleHighEntry")
self.deactivateEntry("preambleSize1Entry")
self.deactivateEntry("preambleSize2Entry")
self.deactivateEntry("headerLengthEntry")
self.activateEntry("arbitraryPreambleTimingEntry")
self.deactivateEntry("preamblePulseCountEntry")
else:
self.deactivateEntry("preambleLowEntry")
self.deactivateEntry("preambleHighEntry")
self.deactivateEntry("preambleSize1Entry")
self.deactivateEntry("preambleSize2Entry")
self.deactivateEntry("headerLengthEntry")
self.deactivateEntry("arbitraryPreambleTimingEntry")
self.activateEntry("preamblePulseCountEntry")
# when the Decode button is pushed, we need to take all the settings
# that the user has entered into the gui and place them in the current
# protocol object; we then call the decoder engine to extract the payload
def on_Decode_clicked(self, button, data=None):
if wcv.verbose:
print "Now Decoding Baseband"
# get values from GUI
self.transferGUIDataToGlobals()
self.transferGUIDataToProtocol()
if wcv.verbose:
print "baseband sample rate:" + str(wcv.basebandSampleRate)
print wcv.protocol.fullProtocolString()
print "tx list length: " + str(len(wcv.txList))
(wcv.txList, wcv.decodeOutputString) = decodeAllTx(protocol = wcv.protocol,
txList = wcv.txList,
outputHex = wcv.outputHex,
timingError = wcv.timingError,
glitchFilterCount = wcv.glitchFilterCount,
verbose = wcv.verbose,
showAllTx = wcv.showAllTx)
# update the display of tx valid flags
interPacketValidCount = 0
preambleValidCount = 0
headerValidCount = 0
encodingValidCount = 0
crcValidCount = 0
txValidCount = 0
for iTx in wcv.txList:
interPacketValidCount += iTx.interPacketTimingValid
preambleValidCount += iTx.preambleValid
headerValidCount += iTx.headerValid
encodingValidCount += iTx.encodingValid
crcValidCount += iTx.crcValid
txValidCount += iTx.txValid
numTx = len(wcv.txList)
if len(wcv.protocol.crcPoly) <= 0:
crcStringOut = "N/A"
else:
crcStringOut = str(crcValidCount) + "/" + str(numTx)
self.setLabel("guiGoodPackets1", str(txValidCount) + "/" + str(numTx))
self.setLabel("guiPreambleMatches1", str(preambleValidCount) + "/" + str(numTx))
self.setLabel("guiEncodingValid1", str(encodingValidCount) + "/" + str(numTx))
self.setLabel("guiCrcPass1", crcStringOut)
self.setLabel("guiGoodPackets2", str(txValidCount) + "/" + str(numTx))
self.setLabel("guiPreambleMatches2", str(preambleValidCount) + "/" + str(numTx))
self.setLabel("guiEncodingValid2", str(encodingValidCount) + "/" + str(numTx))
self.setLabel("guiCrcPass2", crcStringOut)
self.setLabel("guiGoodPackets3", str(txValidCount) + "/" + str(numTx))
self.setLabel("guiPreambleMatches3", str(preambleValidCount) + "/" + str(numTx))
self.setLabel("guiEncodingValid3", str(encodingValidCount) + "/" + str(numTx))
self.setLabel("guiCrcPass3", crcStringOut)
self.setLabel("guiGoodPackets4", str(txValidCount) + "/" + str(numTx))
self.setLabel("guiPreambleMatches4", str(preambleValidCount) + "/" + str(numTx))
self.setLabel("guiEncodingValid4", str(encodingValidCount) + "/" + str(numTx))
self.setLabel("guiCrcPass4", crcStringOut)
# change the text in all windows (NEED a framed approach)
self.decodeTextViewWidget1 = self.builder.get_object("decodeTextView1")
self.decodeTextViewWidget1.modify_font(Pango.font_description_from_string('Courier 12'))
self.decodeTextViewWidget1.get_buffer().set_text(wcv.decodeOutputString)
self.decodeTextViewWidget2 = self.builder.get_object("decodeTextView2")
self.decodeTextViewWidget2.modify_font(Pango.font_description_from_string('Courier 12'))
self.decodeTextViewWidget2.get_buffer().set_text(wcv.decodeOutputString)
self.decodeTextViewWidget3 = self.builder.get_object("decodeTextView3")
self.decodeTextViewWidget3.modify_font(Pango.font_description_from_string('Courier 12'))
self.decodeTextViewWidget3.get_buffer().set_text(wcv.decodeOutputString)
def on_runStat_clicked(self, button, data=None):
if wcv.verbose:
print "Now Computing Payload Statistics..."
# get values from GUI
self.transferGUIDataToGlobals()
self.transferGUIDataToProtocol()
(wcv.bitProbList, idListMaster, valListMaster, payloadLenList) = computeStats(txList = wcv.txList,
protocol = wcv.protocol,
showAllTx = wcv.showAllTx)
# experimental new feature
#from statEngine import computeUavStats
plugin_stats_stdout(txList = wcv.txList,
protocol = wcv.protocol,
showAllTx = wcv.showAllTx)
"""
uavValList = computeUavStats(txList = wcv.txList,
protocol = wcv.protocol,
showAllTx = wcv.showAllTx)
"""
(wcv.bitProbString, idStatString, valuesString) = buildStatStrings(bitProbList = wcv.bitProbList,
idListMaster = idListMaster,
valListMaster = valListMaster,
payloadLenList = payloadLenList,
outputHex = wcv.outputHex)
# display bit probabilities in correct GUI element
self.bitProbTextViewWidget = self.builder.get_object("bitProbTextView")
#self.bitProbTextViewWidget.modify_font(Pango.font_description_from_string('Courier 8'))
self.bitProbTextViewWidget.get_buffer().set_text(wcv.bitProbString)
# display ID frequency data
self.idValuesTextViewWidget = self.builder.get_object("idValuesTextView")
#self.idValuesTextViewWidget.modify_font(Pango.font_description_from_string('Courier 8'))
self.idValuesTextViewWidget.get_buffer().set_text(idStatString)
# need to add values 2 and 3 (or make into a list)
### print value ranges
self.idValuesTextViewWidget = self.builder.get_object("fieldValuesTextView")
#self.idValuesTextViewWidget.modify_font(Pango.font_description_from_string('Courier 8'))
self.idValuesTextViewWidget.get_buffer().set_text(valuesString)
# when a new protocol is loaded, we use its information to populate GUI
def populateProtocolToGui(self, protocol):
# add global WC control values
self.setEntry("centerFreqEntry", wcv.center_freq/1000000.0)
self.setEntry("sampRateEntry", wcv.samp_rate/1000000.0)
self.setEntry("glitchFilterEntry", wcv.glitchFilterCount)
self.setEntry("unitTimingErrorEntry", wcv.timingError*100.0)
# add RF properties
self.setEntry("iqFileNameEntry", wcv.iqFileName)
self.setEntry("bbFileNameEntry", wcv.waveformFileName)
self.setEntry("frequencyEntry", wcv.protocol.frequency/1000000.0)
self.setEntry("frequencyHopListEntry", wcv.protocol.frequencyHopList)
self.setEntry("channelWidthEntry", wcv.protocol.channelWidth/1000.0)
self.setEntry("transitionWidthEntry", wcv.protocol.transitionWidth/1000.0)
self.setEntryBox("modulationEntryBox", wcv.protocol.modulation)
self.setEntry("fskDeviationEntry", wcv.protocol.fskDeviation/1000.0)
self.setEntry("thresholdEntry", wcv.protocol.threshold)
self.setEntry("fskSquelchEntry", wcv.protocol.fskSquelchLeveldB)
self.setEntry("bbSampRateEntry", wcv.protocol.bb_samp_rate/1000000.0)
self.setEntryBox("idleLevelEntryBox", wcv.protocol.interPacketSymbol)
# add preamble properties
self.setEntryBox("preambleTypeEntryBox", wcv.protocol.preambleType)
self.setEntry("preambleLowEntry", wcv.protocol.preambleSymbolLow)
self.setEntry("preambleHighEntry", wcv.protocol.preambleSymbolHigh)
self.setEntry("preambleSize1Entry", int(wcv.protocol.preambleSize[0]))
self.setEntry("preambleSize2Entry", int(wcv.protocol.preambleSize[1]))
self.setEntry("headerLengthEntry", wcv.protocol.headerWidth)
self.setEntry("interPacketWidthEntry", wcv.protocol.interPacketWidth)
self.setEntry("arbitraryPreambleTimingEntry", wcv.protocol.arbPreambleList)
self.setEntry("preamblePulseCountEntry" , wcv.protocol.preamblePulseCount)
# add payload properties
self.setEntryBox("encodingEntryBox", wcv.protocol.encodingType)
self.setEntry("payloadUnitEntry", wcv.protocol.unitWidth)
if wcv.protocol.pwmSymbolOrder01:
self.setEntryBox("pwmOrderEntryBox", 0)
else:
self.setEntryBox("pwmOrderEntryBox", 1)
self.setEntry("pwmZeroLowEntry", wcv.protocol.pwmZeroSymbol[0])
self.setEntry("pwmZeroHighEntry", wcv.protocol.pwmZeroSymbol[1])
self.setEntry("pwmOneLowEntry", wcv.protocol.pwmOneSymbol[0])
self.setEntry("pwmOneHighEntry", wcv.protocol.pwmOneSymbol[1])
self.setEntry("numPayloadBitsEntry", wcv.protocol.packetSize)
# add CRC properties
self.setEntry("crcLengthEntry", len(wcv.protocol.crcPoly))
for i in xrange(wcv.NUM_CRC):
self.setEntry("crc" + str(i+1) + "StartAddrEntry", wcv.protocol.crcAddr[i][0])
self.setEntry("crc" + str(i+1) + "DataLowEntry", wcv.protocol.crcData[i][0])
self.setEntry("crc" + str(i+1) + "DataHighEntry", wcv.protocol.crcData[i][1])
self.setEntry("crcPolynomialEntry", wcv.protocol.crcPoly)
self.setEntry("crcInitEntry", wcv.protocol.crcInit)
self.setEntryBox("crcReflectEntryBox", wcv.protocol.crcBitOrder)
self.setEntryBox("crcReverseOutEntryBox", wcv.protocol.crcReverseOut)
self.setEntry("crcFinalXorEntry", wcv.protocol.crcFinalXor)
self.setEntryBox("crcPadEntryBox", wcv.protocol.crcPad)
self.setEntryBox("crcPadCountEntryBox", wcv.protocol.crcPadCount/8)
# add the ACS properties
self.setEntry("acsBitLengthEntry", wcv.protocol.acsLength)
self.setEntryBox("acsInvertEntryBox", int(wcv.protocol.acsInvertData))
for i in xrange(wcv.NUM_ACS):
self.setEntry("acs" + str(i+1) + "InitEntry", wcv.protocol.acsInitSum[i])
self.setEntry("acs" + str(i+1) + "AddrLowEntry", wcv.protocol.acsAddr[i][0])
self.setEntry("acs" + str(i+1) + "AddrHighEntry", wcv.protocol.acsAddr[i][1])
self.setEntry("acs" + str(i+1) + "DataLowEntry", wcv.protocol.acsData[i][0])
self.setEntry("acs" + str(i+1) + "DataHighEntry", wcv.protocol.acsData[i][1])
# add payload statistics properties
self.setEntry("id1AddrLowEntry", wcv.protocol.idAddr[0][0])
self.setEntry("id1AddrHighEntry", wcv.protocol.idAddr[0][1])
self.setEntry("id2AddrLowEntry", wcv.protocol.idAddr[1][0])
self.setEntry("id2AddrHighEntry", wcv.protocol.idAddr[1][1])
self.setEntry("id3AddrLowEntry", wcv.protocol.idAddr[2][0])
self.setEntry("id3AddrHighEntry", wcv.protocol.idAddr[2][1])
self.setEntry("id4AddrLowEntry", wcv.protocol.idAddr[3][0])
self.setEntry("id4AddrHighEntry", wcv.protocol.idAddr[3][1])
self.setEntry("id5AddrLowEntry", wcv.protocol.idAddr[4][0])
self.setEntry("id5AddrHighEntry", wcv.protocol.idAddr[4][1])
self.setEntry("id6AddrLowEntry", wcv.protocol.idAddr[5][0])
self.setEntry("id6AddrHighEntry", wcv.protocol.idAddr[5][1])
self.setEntry("val1AddrLowEntry", wcv.protocol.valAddr[0][0])
self.setEntry("val1AddrHighEntry", wcv.protocol.valAddr[0][1])
self.setEntry("val2AddrLowEntry", wcv.protocol.valAddr[1][0])
self.setEntry("val2AddrHighEntry", wcv.protocol.valAddr[1][1])
self.setEntry("val3AddrLowEntry", wcv.protocol.valAddr[2][0])
self.setEntry("val3AddrHighEntry", wcv.protocol.valAddr[2][1])
def __init__(self, protocol):
#global protocol
self.gladefile = "gui/top_level.glade"
self.builder = Gtk.Builder()
self.builder.add_from_file(self.gladefile)
self.builder.connect_signals(self)
# required setup for spin button range
self.txSelectSpinButton = self.builder.get_object("transmissionNumberSelect")
self.spinButtonAdjustment = Gtk.Adjustment(0, 0, 0, 1, 1, 1)
self.txSelectSpinButton.set_adjustment(self.spinButtonAdjustment)
#
self.txSelectSpinButton1 = self.builder.get_object("transmissionNumberSelect1")
self.spinButtonAdjustment1 = Gtk.Adjustment(0, 0, 0, 1, 1, 1)
self.txSelectSpinButton1.set_adjustment(self.spinButtonAdjustment1)
#
self.txSelectSpinButton2 = self.builder.get_object("transmissionNumberSelect2")
self.spinButtonAdjustment2 = Gtk.Adjustment(0, 0, 0, 1, 1, 1)
self.txSelectSpinButton2.set_adjustment(self.spinButtonAdjustment2)
# setup axis, canvas and figure
self.figure = Figure(figsize=(8,6), dpi=71) # replace with ???
self.axis = self.figure.add_subplot(111)
t = arange(0, 100, 1)
s = arange(0, 100, 1)
self.axis.plot(t,s)
self.axis.axis([0, 100, -0.1, 1.1])
self.axis.set_xlabel('time (ms)')
self.axis.set_ylabel('Baseband Signal')
self.canvas = FigureCanvas(self.figure) # a Gtk.DrawingArea
self.canvas1 = FigureCanvas(self.figure)
self.canvas2 = FigureCanvas(self.figure)
self.canvas.set_size_request(400,40)
self.canvas1.set_size_request(400,40)
self.canvas2.set_size_request(400,40)
# each canvas should be identical; assign to the widgets on 3 tabs
self.plotWidget = self.builder.get_object("basebandPlot")
self.plotWidget1 = self.builder.get_object("basebandPlot1")
self.plotWidget2 = self.builder.get_object("basebandPlot2")
self.plotWidget.add_with_viewport(self.canvas)
self.plotWidget1.add_with_viewport(self.canvas1)
self.plotWidget2.add_with_viewport(self.canvas2)
self.plotWidget.show_all()
self.plotWidget1.show_all()
self.plotWidget2.show_all()
# setup for protocol dialog
self.protocolDialog = self.builder.get_object("protocolDialog")
self.protocolTreeView = self.builder.get_object("protocolTreeView")
self.protocolTreeSelection = self.builder.get_object("protocolTreeView_selection")
# now add cell renderers for each column of protocol dialog
renderer_ID = Gtk.CellRendererText()
column_ID = Gtk.TreeViewColumn("ID", renderer_ID, text=0)
self.protocolTreeView.append_column(column_ID)
renderer_type = Gtk.CellRendererText()
column_type = Gtk.TreeViewColumn("Type", renderer_type, text=1)
self.protocolTreeView.append_column(column_type)
renderer_make = Gtk.CellRendererText()
column_make = Gtk.TreeViewColumn("Make", renderer_make, text=2)
self.protocolTreeView.append_column(column_make)
renderer_model = Gtk.CellRendererText()
column_model = Gtk.TreeViewColumn("Model", renderer_model, text=3)
self.protocolTreeView.append_column(column_model)
renderer_year = Gtk.CellRendererText()
column_year = Gtk.TreeViewColumn("Year", renderer_year, text=4)
self.protocolTreeView.append_column(column_year)
renderer_mod = Gtk.CellRendererText()
column_mod = Gtk.TreeViewColumn("Modulation", renderer_mod, text=5)
self.protocolTreeView.append_column(column_mod)
renderer_freq = Gtk.CellRendererText()
column_freq = Gtk.TreeViewColumn("Frequency", renderer_freq, text=6)
self.protocolTreeView.append_column(column_freq)
self.window = self.builder.get_object("window1")
self.aboutdialog = self.builder.get_object("aboutdialog1")
#self.userGuideWindow = self.builder.get_object("userGuideWindow")
self.protocolSaveAsDialog = self.builder.get_object("protocolSaveAsDialog")
#self.statusbar = self.builder.get_object("statusbar1")
self.window.unmaximize() # doesn't seem to work
self.window.show()
# if we were passed a protocol via the command line or via
# manual definition, populate gui with those values
#if not (protocol_number == -1):
self.populateProtocolToGui(protocol)
| mit |
fbagirov/scikit-learn | sklearn/preprocessing/tests/test_label.py | 156 | 17626 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
lb = LabelBinarizer()
# one-class case defaults to negative label
inp = ["pos", "pos", "pos", "pos"]
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, 1, -1])
assert_raises(ValueError, le.inverse_transform, [-1])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
| bsd-3-clause |
casawa/mdtraj | mdtraj/formats/mol2.py | 3 | 7865 | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Kyle A. Beauchamp
# Contributors: Robert McGibbon, John D. Chodera
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
#
# Portions of this code originate from the OpenMM molecular simulation
# toolkit, copyright (c) 2012 Stanford University and Peter Eastman. Those
# portions are distributed under the following terms:
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
##############################################################################
"""Load an md.Topology from tripos mol2 files.
"""
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
import numpy as np
import itertools
import re
from mdtraj.utils import import_
from mdtraj.utils.six.moves import cStringIO as StringIO
from mdtraj.formats.registry import _FormatRegistry
__all__ = ['load_mol2', "mol2_to_dataframes"]
@_FormatRegistry.register_loader('.mol2')
def load_mol2(filename):
"""Load a TRIPOS mol2 file from disk.
Parameters
----------
filename : str
Path to the prmtop file on disk.
Returns
-------
traj : md.Trajectory
The resulting topology, as an md.Topology object.
Notes
-----
This function should work on GAFF and sybyl style MOL2 files, but has
been primarily tested on GAFF mol2 files.
This function does NOT accept multi-structure MOL2 files!!!
The elements are guessed using GAFF atom types or via the atype string.
Examples
--------
>>> traj = md.load_mol2('mysystem.mol2')
"""
from mdtraj.core.trajectory import Trajectory
from mdtraj.core.topology import Topology
atoms, bonds = mol2_to_dataframes(filename)
atoms_mdtraj = atoms[["name", "resName"]].copy()
atoms_mdtraj["serial"] = atoms.index
#Figure out 1 letter element names
# IF this is a GAFF mol2, this line should work without issues
atoms_mdtraj["element"] = atoms.atype.map(gaff_elements)
# If this is a sybyl mol2, there should be NAN (null) values
if atoms_mdtraj.element.isnull().any():
# If this is a sybyl mol2, I think this works generally.
atoms_mdtraj["element"] = atoms.atype.apply(lambda x: x.strip(".")[0])
atoms_mdtraj["resSeq"] = np.ones(len(atoms), 'int')
atoms_mdtraj["chainID"] = np.ones(len(atoms), 'int')
bonds_mdtraj = bonds[["id0", "id1"]].values
offset = bonds_mdtraj.min() # Should this just be 1???
bonds_mdtraj -= offset
top = Topology.from_dataframe(atoms_mdtraj, bonds_mdtraj)
xyzlist = np.array([atoms[["x", "y", "z"]].values])
xyzlist /= 10.0 # Convert from angstrom to nanometer
traj = Trajectory(xyzlist, top)
return traj
def mol2_to_dataframes(filename):
"""Convert a GAFF (or sybyl) mol2 file to a pair of pandas dataframes.
Parameters
----------
filename : str
Name of mol2 filename
Returns
-------
atoms_frame : pd.DataFrame
DataFrame containing atom information
bonds_frame : pd.DataFrame
DataFrame containing bond information
Notes
-----
These dataframes may contain force field information as well as the
information necessary for constructing the coordinates and molecular
topology. This function has been tested for GAFF and sybyl-style
mol2 files but has been primarily tested on GAFF mol2 files.
This function does NOT accept multi-structure MOL2 files!!!
See Also
--------
If you just need the coordinates and bonds, use load_mol2(filename)
to get a Trajectory object.
"""
pd = import_('pandas')
with open(filename) as f:
data = dict((key, list(grp)) for key, grp in itertools.groupby(f, _parse_mol2_sections))
# Mol2 can have "status bits" at the end of the bond lines. We don't care
# about these, but they interfere with using pd_read_table because it looks
# like one line has too many columns. So we just regex out the offending
# text.
status_bit_regex = "BACKBONE|DICT|INTERRES|\|"
data["@<TRIPOS>BOND\n"] = [re.sub(status_bit_regex, lambda _: "", s)
for s in data["@<TRIPOS>BOND\n"]]
csv = StringIO()
csv.writelines(data["@<TRIPOS>BOND\n"][1:])
csv.seek(0)
bonds_frame = pd.read_table(csv, names=["bond_id", "id0", "id1", "bond_type"],
index_col=0, header=None, sep="\s*", engine='python')
csv = StringIO()
csv.writelines(data["@<TRIPOS>ATOM\n"][1:])
csv.seek(0)
atoms_frame = pd.read_csv(csv, sep="\s*", engine='python', header=None,
names=["serial", "name", "x", "y", "z",
"atype", "code", "resName", "charge"])
return atoms_frame, bonds_frame
def _parse_mol2_sections(x):
"""Helper function for parsing a section in a MOL2 file."""
if x.startswith('@<TRIPOS>'):
_parse_mol2_sections.key = x
return _parse_mol2_sections.key
gaff_elements = {
'br': 'Br',
'c': 'C',
'c1': 'C',
'c2': 'C',
'c3': 'C',
'ca': 'C',
'cc': 'C',
'cd': 'C',
'ce': 'C',
'cf': 'C',
'cg': 'C',
'ch': 'C',
'cl': 'Cl',
'cp': 'C',
'cq': 'C',
'cu': 'C',
'cv': 'C',
'cx': 'C',
'cy': 'C',
'cz': 'C',
'f': 'F',
'h1': 'H',
'h2': 'H',
'h3': 'H',
'h4': 'H',
'h5': 'H',
'ha': 'H',
'hc': 'H',
'hn': 'H',
'ho': 'H',
'hp': 'H',
'hs': 'H',
'hw': 'H',
'hx': 'H',
'i': 'I',
'n': 'N',
'n1': 'N',
'n2': 'N',
'n3': 'N',
'n4': 'N',
'na': 'N',
'nb': 'N',
'nc': 'N',
'nd': 'N',
'ne': 'N',
'nf': 'N',
'nh': 'N',
'no': 'N',
'o': 'O',
'oh': 'O',
'os': 'O',
'ow': 'O',
'p2': 'P',
'p3': 'P',
'p4': 'P',
'p5': 'P',
'pb': 'P',
'px': 'P',
'py': 'P',
's': 'S',
's2': 'S',
's4': 'S',
's6': 'S',
'sh': 'S',
'ss': 'S',
'sx': 'S',
'sy': 'S'}
| lgpl-2.1 |
barentsen/dave | diffimg/test_tessprf.py | 1 | 4771 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 30 23:11:29 2018
@author: fergal
"""
from __future__ import print_function
from __future__ import division
from pdb import set_trace as debug
import matplotlib.pyplot as plt
#import matplotlib.patches as mpatch
#import matplotlib as mpl
#import pandas as pd
import numpy as np
import pytest
import tessprf as prf
datapath = "/home/fergal/data/tess/prf/"
def test_bracketing():
"""The method getRegularlySampledBracketingPrfs() has some fairly complicated
bookkeeping to find the locations of the 4 prfs that brack the input col,row
in 2d space. It internally checks this bookkeeping is correct and raises
an assert on failure. This test exercises all 4 paths in the code.
"""
obj = prf.TessPrf(datapath)
#This raises an assertion error
obj.getPrfAtColRow(1587, 1710, 1, 1, 1)
obj.getPrfAtColRow(1581, 1537, 1, 1, 1) #A
obj.getPrfAtColRow(1579, 1537, 1, 1, 1) #S
obj.getPrfAtColRow(1579, 1535, 1, 1, 1) #T
obj.getPrfAtColRow(1581, 1535, 1, 1, 1) #Cs
#Test out of bounds behavour
def test_outOfBounds():
obj = prf.TessPrf(datapath)
with pytest.raises(ValueError):
obj.getPrfAtColRow(0,0, 1, 1, 1)
with pytest.raises(ValueError):
obj.getPrfAtColRow(44,1, 1, 1, 1)
with pytest.raises(ValueError):
obj.getPrfAtColRow(2093,1, 1, 1, 1)
with pytest.raises(ValueError):
obj.getPrfAtColRow(47, 0, 1, 1, 1)
with pytest.raises(ValueError):
obj.getPrfAtColRow(47,2048 , 1, 1, 1)
#Check some in bounds
obj.getPrfAtColRow(45, 1, 1, 1, 1)
obj.getPrfAtColRow(2091, 2047, 1, 1, 1)
def testIntFloatBug():
"""getPrfAtColRow() should return same value whether input is int or float"""
obj = prf.TessPrf(datapath)
img1 = obj.getPrfAtColRow(123, 456, 1,1,1)
img2 = obj.getPrfAtColRow(123.0, 456.0, 1,1,1)
assert np.all(img1 - img2 == 0)
def imgByOffset():
ccd, camera, sector = 1,1,1
col, row = 123, 456
obj = prf.TessPrf(datapath)
prfObjArray = obj.readPrfFile(ccd, camera, sector)
singlePrfObj = prfObjArray[0]
img0 = obj.getRegularlySampledPrfByOffset(singlePrfObj, 0, 0)
for offset in range(9):
# img1 = obj.getRegularlySampledPrfByOffset(singlePrfObj, offset, 0)
img1 = obj.getRegularlySampledPrfByOffset(singlePrfObj, offset, 0)
delta = img1 - img0
kwargs = {'origin':'bottom', 'interpolation':'nearest', 'cmap':plt.cm.YlGnBu_r}
plt.clf()
plt.subplot(121)
plt.imshow(img1, **kwargs)
plt.colorbar()
plt.subplot(122)
kwargs['cmap'] = plt.cm.PiYG
plt.imshow(delta, **kwargs)
vm = max( np.fabs( [np.min(delta), np.max(delta)] ))
# vm = 1e-2
plt.clim(-vm, vm)
plt.colorbar()
plt.suptitle(offset)
plt.pause(.1)
raw_input()
def testColFrac():
"""Test that changing column fraction moves flux around"""
obj = prf.TessPrf(datapath)
img1 = obj.getPrfAtColRow(123.0, 456, 1,1,1)
for frac in np.linspace(0, .9, 11):
print("Frac is %g" %(frac))
img2 = obj.getPrfAtColRow(123.0 + frac, 456.0, 1,1,1)
delta = img2 - img1
# prfPlot(img1, delta)
#For TESS, PRFs are 13x13. Check the flux near the centre
#is moving from lower columns to higher ones
assert delta[6,6] >= 0, delta[6,6]
assert delta[6,7] >= 0, delta[6,7]
assert delta[6,5] <= 0, delta[6,5]
def testRowFrac():
"""Test that changing column fraction moves flux around"""
obj = prf.TessPrf(datapath)
img1 = obj.getPrfAtColRow(123.0, 456, 1,1,1)
for frac in np.linspace(0, .9, 11):
img2 = obj.getPrfAtColRow(123.0, 456.0 + frac, 1,1,1)
delta = img2 - img1
# prfPlot(img1, delta)
#For TESS, PRFs are 13x13. Check the flux near the centre
#is moving from lower columns to higher ones
assert delta[6,6] >= 0, delta[6,6]
assert delta[7,6] >= 0, delta[7,6]
assert delta[5,6] <= 0, delta[5,6]
def prfPlot(refImg, delta):
kwargs = {'origin':'bottom', 'interpolation':'nearest', 'cmap':plt.cm.YlGnBu_r}
plt.clf()
plt.subplot(121)
plt.imshow(refImg, **kwargs)
plt.colorbar()
plt.subplot(122)
kwargs['cmap'] = plt.cm.PiYG
plt.imshow(delta, **kwargs)
vm = max( np.fabs( [np.min(delta), np.max(delta)] ))
# vm = 1e-2
plt.clim(-vm, vm)
plt.colorbar()
plt.pause(.1)
| mit |
fenglu-g/incubator-airflow | tests/hooks/test_hive_hook.py | 1 | 20325 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import itertools
import os
import random
import unittest
from collections import OrderedDict
import mock
import pandas as pd
from hmsclient import HMSClient
from airflow import DAG, configuration
from airflow.exceptions import AirflowException
from airflow.hooks.hive_hooks import HiveCliHook, HiveMetastoreHook, HiveServer2Hook
from airflow.operators.hive_operator import HiveOperator
from airflow.utils import timezone
from airflow.utils.operator_helpers import AIRFLOW_VAR_NAME_FORMAT_MAPPING
from airflow.utils.tests import assertEqualIgnoreMultipleSpaces
configuration.load_test_config()
DEFAULT_DATE = timezone.datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
class HiveEnvironmentTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG('test_dag_id', default_args=args)
self.next_day = (DEFAULT_DATE +
datetime.timedelta(days=1)).isoformat()[:10]
self.database = 'airflow'
self.partition_by = 'ds'
self.table = 'static_babynames_partitioned'
self.hql = """
CREATE DATABASE IF NOT EXISTS {{ params.database }};
USE {{ params.database }};
DROP TABLE IF EXISTS {{ params.table }};
CREATE TABLE IF NOT EXISTS {{ params.table }} (
state string,
year string,
name string,
gender string,
num int)
PARTITIONED BY ({{ params.partition_by }} string);
ALTER TABLE {{ params.table }}
ADD PARTITION({{ params.partition_by }}='{{ ds }}');
"""
self.hook = HiveMetastoreHook()
t = HiveOperator(
task_id='HiveHook_' + str(random.randint(1, 10000)),
params={
'database': self.database,
'table': self.table,
'partition_by': self.partition_by
},
hive_cli_conn_id='beeline_default',
hql=self.hql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def tearDown(self):
hook = HiveMetastoreHook()
with hook.get_conn() as metastore:
metastore.drop_table(self.database, self.table, deleteData=True)
class TestHiveCliHook(unittest.TestCase):
def test_run_cli(self):
hook = HiveCliHook()
hook.run_cli("SHOW DATABASES")
def test_run_cli_with_hive_conf(self):
hql = "set key;\n" \
"set airflow.ctx.dag_id;\nset airflow.ctx.dag_run_id;\n" \
"set airflow.ctx.task_id;\nset airflow.ctx.execution_date;\n"
dag_id_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_ID']['env_var_format']
task_id_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_TASK_ID']['env_var_format']
execution_date_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_EXECUTION_DATE'][
'env_var_format']
dag_run_id_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_RUN_ID'][
'env_var_format']
os.environ[dag_id_ctx_var_name] = 'test_dag_id'
os.environ[task_id_ctx_var_name] = 'test_task_id'
os.environ[execution_date_ctx_var_name] = 'test_execution_date'
os.environ[dag_run_id_ctx_var_name] = 'test_dag_run_id'
hook = HiveCliHook()
output = hook.run_cli(hql=hql, hive_conf={'key': 'value'})
self.assertIn('value', output)
self.assertIn('test_dag_id', output)
self.assertIn('test_task_id', output)
self.assertIn('test_execution_date', output)
self.assertIn('test_dag_run_id', output)
del os.environ[dag_id_ctx_var_name]
del os.environ[task_id_ctx_var_name]
del os.environ[execution_date_ctx_var_name]
del os.environ[dag_run_id_ctx_var_name]
@mock.patch('airflow.hooks.hive_hooks.HiveCliHook.run_cli')
def test_load_file(self, mock_run_cli):
filepath = "/path/to/input/file"
table = "output_table"
hook = HiveCliHook()
hook.load_file(filepath=filepath, table=table, create=False)
query = (
"LOAD DATA LOCAL INPATH '{filepath}' "
"OVERWRITE INTO TABLE {table} ;\n"
.format(filepath=filepath, table=table)
)
mock_run_cli.assert_called_with(query)
@mock.patch('airflow.hooks.hive_hooks.HiveCliHook.load_file')
@mock.patch('pandas.DataFrame.to_csv')
def test_load_df(self, mock_to_csv, mock_load_file):
df = pd.DataFrame({"c": ["foo", "bar", "baz"]})
table = "t"
delimiter = ","
encoding = "utf-8"
hook = HiveCliHook()
hook.load_df(df=df,
table=table,
delimiter=delimiter,
encoding=encoding)
mock_to_csv.assert_called_once()
kwargs = mock_to_csv.call_args[1]
self.assertEqual(kwargs["header"], False)
self.assertEqual(kwargs["index"], False)
self.assertEqual(kwargs["sep"], delimiter)
mock_load_file.assert_called_once()
kwargs = mock_load_file.call_args[1]
self.assertEqual(kwargs["delimiter"], delimiter)
self.assertEqual(kwargs["field_dict"], {"c": u"STRING"})
self.assertTrue(isinstance(kwargs["field_dict"], OrderedDict))
self.assertEqual(kwargs["table"], table)
@mock.patch('airflow.hooks.hive_hooks.HiveCliHook.load_file')
@mock.patch('pandas.DataFrame.to_csv')
def test_load_df_with_optional_parameters(self, mock_to_csv, mock_load_file):
hook = HiveCliHook()
b = (True, False)
for create, recreate in itertools.product(b, b):
mock_load_file.reset_mock()
hook.load_df(df=pd.DataFrame({"c": range(0, 10)}),
table="t",
create=create,
recreate=recreate)
mock_load_file.assert_called_once()
kwargs = mock_load_file.call_args[1]
self.assertEqual(kwargs["create"], create)
self.assertEqual(kwargs["recreate"], recreate)
@mock.patch('airflow.hooks.hive_hooks.HiveCliHook.run_cli')
def test_load_df_with_data_types(self, mock_run_cli):
d = OrderedDict()
d['b'] = [True]
d['i'] = [-1]
d['t'] = [1]
d['f'] = [0.0]
d['c'] = ['c']
d['M'] = [datetime.datetime(2018, 1, 1)]
d['O'] = [object()]
d['S'] = ['STRING'.encode('utf-8')]
d['U'] = ['STRING']
d['V'] = [None]
df = pd.DataFrame(d)
hook = HiveCliHook()
hook.load_df(df, 't')
query = """
CREATE TABLE IF NOT EXISTS t (
b BOOLEAN,
i BIGINT,
t BIGINT,
f DOUBLE,
c STRING,
M TIMESTAMP,
O STRING,
S STRING,
U STRING,
V STRING)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
STORED AS textfile
;
"""
assertEqualIgnoreMultipleSpaces(self, mock_run_cli.call_args_list[0][0][0], query)
class TestHiveMetastoreHook(HiveEnvironmentTest):
VALID_FILTER_MAP = {'key2': 'value2'}
def test_get_max_partition_from_empty_part_specs(self):
max_partition = \
HiveMetastoreHook._get_max_partition_from_part_specs([],
'key1',
self.VALID_FILTER_MAP)
self.assertIsNone(max_partition)
def test_get_max_partition_from_valid_part_specs_and_invalid_filter_map(self):
with self.assertRaises(AirflowException):
HiveMetastoreHook._get_max_partition_from_part_specs(
[{'key1': 'value1', 'key2': 'value2'},
{'key1': 'value3', 'key2': 'value4'}],
'key1',
{'key3': 'value5'})
def test_get_max_partition_from_valid_part_specs_and_invalid_partition_key(self):
with self.assertRaises(AirflowException):
HiveMetastoreHook._get_max_partition_from_part_specs(
[{'key1': 'value1', 'key2': 'value2'},
{'key1': 'value3', 'key2': 'value4'}],
'key3',
self.VALID_FILTER_MAP)
def test_get_max_partition_from_valid_part_specs_and_none_partition_key(self):
with self.assertRaises(AirflowException):
HiveMetastoreHook._get_max_partition_from_part_specs(
[{'key1': 'value1', 'key2': 'value2'},
{'key1': 'value3', 'key2': 'value4'}],
None,
self.VALID_FILTER_MAP)
def test_get_max_partition_from_valid_part_specs_and_none_filter_map(self):
max_partition = \
HiveMetastoreHook._get_max_partition_from_part_specs(
[{'key1': 'value1', 'key2': 'value2'},
{'key1': 'value3', 'key2': 'value4'}],
'key1',
None)
# No partition will be filtered out.
self.assertEqual(max_partition, b'value3')
def test_get_max_partition_from_valid_part_specs(self):
max_partition = \
HiveMetastoreHook._get_max_partition_from_part_specs(
[{'key1': 'value1', 'key2': 'value2'},
{'key1': 'value3', 'key2': 'value4'}],
'key1',
self.VALID_FILTER_MAP)
self.assertEqual(max_partition, b'value1')
def test_get_metastore_client(self):
self.assertIsInstance(self.hook.get_metastore_client(), HMSClient)
def test_get_conn(self):
self.assertIsInstance(self.hook.get_conn(), HMSClient)
def test_check_for_partition(self):
partition = "{p_by}='{date}'".format(date=DEFAULT_DATE_DS,
p_by=self.partition_by)
missing_partition = "{p_by}='{date}'".format(date=self.next_day,
p_by=self.partition_by)
self.assertTrue(
self.hook.check_for_partition(self.database, self.table,
partition)
)
self.assertFalse(
self.hook.check_for_partition(self.database, self.table,
missing_partition)
)
def test_check_for_named_partition(self):
partition = "{p_by}={date}".format(date=DEFAULT_DATE_DS,
p_by=self.partition_by)
missing_partition = "{p_by}={date}".format(date=self.next_day,
p_by=self.partition_by)
self.assertTrue(
self.hook.check_for_named_partition(self.database,
self.table,
partition)
)
self.assertFalse(
self.hook.check_for_named_partition(self.database,
self.table,
missing_partition)
)
def test_get_table(self):
table_info = self.hook.get_table(db=self.database,
table_name=self.table)
self.assertEqual(table_info.tableName, self.table)
columns = ['state', 'year', 'name', 'gender', 'num']
self.assertEqual([col.name for col in table_info.sd.cols], columns)
def test_get_tables(self):
tables = self.hook.get_tables(db=self.database,
pattern=self.table + "*")
self.assertIn(self.table, {table.tableName for table in tables})
def test_get_databases(self):
databases = self.hook.get_databases(pattern='*')
self.assertIn(self.database, databases)
def test_get_partitions(self):
partitions = self.hook.get_partitions(schema=self.database,
table_name=self.table)
self.assertEqual(len(partitions), 1)
self.assertEqual(partitions, [{self.partition_by: DEFAULT_DATE_DS}])
def test_max_partition(self):
filter_map = {self.partition_by: DEFAULT_DATE_DS}
partition = self.hook.max_partition(schema=self.database,
table_name=self.table,
field=self.partition_by,
filter_map=filter_map)
self.assertEqual(partition, DEFAULT_DATE_DS.encode('utf-8'))
def test_table_exists(self):
self.assertTrue(self.hook.table_exists(self.table, db=self.database))
self.assertFalse(
self.hook.table_exists(str(random.randint(1, 10000)))
)
class TestHiveServer2Hook(unittest.TestCase):
def _upload_dataframe(self):
df = pd.DataFrame({'a': [1, 2], 'b': [1, 2]})
self.local_path = '/tmp/TestHiveServer2Hook.csv'
df.to_csv(self.local_path, header=False, index=False)
def setUp(self):
configuration.load_test_config()
self._upload_dataframe()
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG('test_dag_id', default_args=args)
self.database = 'airflow'
self.table = 'hive_server_hook'
self.hql = """
CREATE DATABASE IF NOT EXISTS {{ params.database }};
USE {{ params.database }};
DROP TABLE IF EXISTS {{ params.table }};
CREATE TABLE IF NOT EXISTS {{ params.table }} (
a int,
b int)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ',';
LOAD DATA LOCAL INPATH '{{ params.csv_path }}'
OVERWRITE INTO TABLE {{ params.table }};
"""
self.columns = ['{}.a'.format(self.table),
'{}.b'.format(self.table)]
self.hook = HiveMetastoreHook()
t = HiveOperator(
task_id='HiveHook_' + str(random.randint(1, 10000)),
params={
'database': self.database,
'table': self.table,
'csv_path': self.local_path
},
hive_cli_conn_id='beeline_default',
hql=self.hql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def tearDown(self):
hook = HiveMetastoreHook()
with hook.get_conn() as metastore:
metastore.drop_table(self.database, self.table, deleteData=True)
os.remove(self.local_path)
def test_get_conn(self):
hook = HiveServer2Hook()
hook.get_conn()
@mock.patch('pyhive.hive.connect')
def test_get_conn_with_password(self, mock_connect):
from airflow.hooks.base_hook import CONN_ENV_PREFIX
conn_id = "conn_with_password"
conn_env = CONN_ENV_PREFIX + conn_id.upper()
conn_value = os.environ.get(conn_env)
os.environ[conn_env] = "jdbc+hive2://conn_id:conn_pass@localhost:10000/default?authMechanism=LDAP"
HiveServer2Hook(hiveserver2_conn_id=conn_id).get_conn()
mock_connect.assert_called_with(
host='localhost',
port=10000,
auth='LDAP',
kerberos_service_name=None,
username='conn_id',
password='conn_pass',
database='default')
if conn_value:
os.environ[conn_env] = conn_value
def test_get_records(self):
hook = HiveServer2Hook()
query = "SELECT * FROM {}".format(self.table)
results = hook.get_records(query, schema=self.database)
self.assertListEqual(results, [(1, 1), (2, 2)])
def test_get_pandas_df(self):
hook = HiveServer2Hook()
query = "SELECT * FROM {}".format(self.table)
df = hook.get_pandas_df(query, schema=self.database)
self.assertEqual(len(df), 2)
self.assertListEqual(df.columns.tolist(), self.columns)
self.assertListEqual(df[self.columns[0]].values.tolist(), [1, 2])
def test_get_results_header(self):
hook = HiveServer2Hook()
query = "SELECT * FROM {}".format(self.table)
results = hook.get_results(query, schema=self.database)
self.assertListEqual([col[0] for col in results['header']],
self.columns)
def test_get_results_data(self):
hook = HiveServer2Hook()
query = "SELECT * FROM {}".format(self.table)
results = hook.get_results(query, schema=self.database)
self.assertListEqual(results['data'], [(1, 1), (2, 2)])
def test_to_csv(self):
hook = HiveServer2Hook()
query = "SELECT * FROM {}".format(self.table)
csv_filepath = 'query_results.csv'
hook.to_csv(query, csv_filepath, schema=self.database,
delimiter=',', lineterminator='\n', output_header=True)
df = pd.read_csv(csv_filepath, sep=',')
self.assertListEqual(df.columns.tolist(), self.columns)
self.assertListEqual(df[self.columns[0]].values.tolist(), [1, 2])
self.assertEqual(len(df), 2)
def test_multi_statements(self):
sqls = [
"CREATE TABLE IF NOT EXISTS test_multi_statements (i INT)",
"SELECT * FROM {}".format(self.table),
"DROP TABLE test_multi_statements",
]
hook = HiveServer2Hook()
results = hook.get_records(sqls, schema=self.database)
self.assertListEqual(results, [(1, 1), (2, 2)])
def test_get_results_with_hive_conf(self):
hql = ["set key",
"set airflow.ctx.dag_id",
"set airflow.ctx.dag_run_id",
"set airflow.ctx.task_id",
"set airflow.ctx.execution_date"]
dag_id_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_ID']['env_var_format']
task_id_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_TASK_ID']['env_var_format']
execution_date_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_EXECUTION_DATE'][
'env_var_format']
dag_run_id_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_RUN_ID'][
'env_var_format']
os.environ[dag_id_ctx_var_name] = 'test_dag_id'
os.environ[task_id_ctx_var_name] = 'test_task_id'
os.environ[execution_date_ctx_var_name] = 'test_execution_date'
os.environ[dag_run_id_ctx_var_name] = 'test_dag_run_id'
hook = HiveServer2Hook()
output = '\n'.join(res_tuple[0]
for res_tuple
in hook.get_results(hql=hql,
hive_conf={'key': 'value'})['data'])
self.assertIn('value', output)
self.assertIn('test_dag_id', output)
self.assertIn('test_task_id', output)
self.assertIn('test_execution_date', output)
self.assertIn('test_dag_run_id', output)
del os.environ[dag_id_ctx_var_name]
del os.environ[task_id_ctx_var_name]
del os.environ[execution_date_ctx_var_name]
del os.environ[dag_run_id_ctx_var_name]
| apache-2.0 |
M4573R/BuildingMachineLearningSystemsWithPython | ch03/plot_kmeans_example.py | 24 | 4115 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
# inspired by http://scikit-
# learn.org/dev/auto_examples/cluster/plot_kmeans_digits.html#example-
# cluster-plot-kmeans-digits-py
import os
import scipy as sp
from scipy.stats import norm
from matplotlib import pylab
from sklearn.cluster import KMeans
from utils import CHART_DIR
seed = 2
sp.random.seed(seed) # to reproduce the data later on
num_clusters = 3
def plot_clustering(x, y, title, mx=None, ymax=None, xmin=None, km=None):
pylab.figure(num=None, figsize=(8, 6))
if km:
pylab.scatter(x, y, s=50, c=km.predict(list(zip(x, y))))
else:
pylab.scatter(x, y, s=50)
pylab.title(title)
pylab.xlabel("Occurrence word 1")
pylab.ylabel("Occurrence word 2")
pylab.autoscale(tight=True)
pylab.ylim(ymin=0, ymax=1)
pylab.xlim(xmin=0, xmax=1)
pylab.grid(True, linestyle='-', color='0.75')
return pylab
xw1 = norm(loc=0.3, scale=.15).rvs(20)
yw1 = norm(loc=0.3, scale=.15).rvs(20)
xw2 = norm(loc=0.7, scale=.15).rvs(20)
yw2 = norm(loc=0.7, scale=.15).rvs(20)
xw3 = norm(loc=0.2, scale=.15).rvs(20)
yw3 = norm(loc=0.8, scale=.15).rvs(20)
x = sp.append(sp.append(xw1, xw2), xw3)
y = sp.append(sp.append(yw1, yw2), yw3)
i = 1
plot_clustering(x, y, "Vectors")
pylab.savefig(os.path.join(CHART_DIR, "1400_03_0%i.png" % i))
pylab.clf()
i += 1
# 1 iteration ####################
mx, my = sp.meshgrid(sp.arange(0, 1, 0.001), sp.arange(0, 1, 0.001))
km = KMeans(init='random', n_clusters=num_clusters, verbose=1,
n_init=1, max_iter=1,
random_state=seed)
km.fit(sp.array(list(zip(x, y))))
Z = km.predict(sp.c_[mx.ravel(), my.ravel()]).reshape(mx.shape)
plot_clustering(x, y, "Clustering iteration 1", km=km)
pylab.imshow(Z, interpolation='nearest',
extent=(mx.min(), mx.max(), my.min(), my.max()),
cmap=pylab.cm.Blues,
aspect='auto', origin='lower')
c1a, c1b, c1c = km.cluster_centers_
pylab.scatter(km.cluster_centers_[:, 0], km.cluster_centers_[:, 1],
marker='x', linewidth=2, s=100, color='black')
pylab.savefig(os.path.join(CHART_DIR, "1400_03_0%i.png" % i))
pylab.clf()
i += 1
# 2 iterations ####################
km = KMeans(init='random', n_clusters=num_clusters, verbose=1,
n_init=1, max_iter=2,
random_state=seed)
km.fit(sp.array(list(zip(x, y))))
Z = km.predict(sp.c_[mx.ravel(), my.ravel()]).reshape(mx.shape)
plot_clustering(x, y, "Clustering iteration 2", km=km)
pylab.imshow(Z, interpolation='nearest',
extent=(mx.min(), mx.max(), my.min(), my.max()),
cmap=pylab.cm.Blues,
aspect='auto', origin='lower')
c2a, c2b, c2c = km.cluster_centers_
pylab.scatter(km.cluster_centers_[:, 0], km.cluster_centers_[:, 1],
marker='x', linewidth=2, s=100, color='black')
pylab.gca().add_patch(
pylab.Arrow(c1a[0], c1a[1], c2a[0] - c1a[0], c2a[1] - c1a[1], width=0.1))
pylab.gca().add_patch(
pylab.Arrow(c1b[0], c1b[1], c2b[0] - c1b[0], c2b[1] - c1b[1], width=0.1))
pylab.gca().add_patch(
pylab.Arrow(c1c[0], c1c[1], c2c[0] - c1c[0], c2c[1] - c1c[1], width=0.1))
pylab.savefig(os.path.join(CHART_DIR, "1400_03_0%i.png" % i))
pylab.clf()
i += 1
# 3 iterations ####################
km = KMeans(init='random', n_clusters=num_clusters, verbose=1,
n_init=1, max_iter=10,
random_state=seed)
km.fit(sp.array(list(zip(x, y))))
Z = km.predict(sp.c_[mx.ravel(), my.ravel()]).reshape(mx.shape)
plot_clustering(x, y, "Clustering iteration 10", km=km)
pylab.imshow(Z, interpolation='nearest',
extent=(mx.min(), mx.max(), my.min(), my.max()),
cmap=pylab.cm.Blues,
aspect='auto', origin='lower')
pylab.scatter(km.cluster_centers_[:, 0], km.cluster_centers_[:, 1],
marker='x', linewidth=2, s=100, color='black')
pylab.savefig(os.path.join(CHART_DIR, "1400_03_0%i.png" % i))
pylab.clf()
i += 1
| mit |
HuimingCheng/AutoGrading | working_demo/main.py | 1 | 10438 | # -*- coding: utf-8 -*-
import cv2
import numpy as np
# import matplotlib.pyplot as plt
import math
try:
# for pycharm projet
from sample.database_grading_demo.Box import Box
from sample.database_grading_demo.AnswerSheet import AnswerSheet
from sample.database_grading_demo.helperFunction import getNameFromDatabse, updateScore
import sample.database_grading_demo.database as database
except:
# for non-pycharm user
from Box import Box
from AnswerSheet import AnswerSheet
from helperFunction import getNameFromDatabse, updateScore
import database as database
import os
import time
import sys
# import Tkinter
import tkinter
from importlib import reload
import mysql.connector
import sshtunnel
from mysql.connector.cursor import MySQLCursor
from pytesseract import image_to_string, image_to_boxes
from PIL import Image
from difflib import SequenceMatcher
from PIL import *
from PIL import ImageEnhance
import time
# begin of handwriting
#===========================================================================
def findcoordinateOfName(path):
image = cv2.imread(path)
height, width = image.shape[:2]
crop_img = image[ 0:int(height/3), 0:width]
cv2.imwrite("temp.png", crop_img)
image = Image.open("temp.png")
box = image_to_boxes(image).split('\n')
width , height = image.size
coordinate = []
for i in range(len(box)):
flag = False
if (box[i][0] == 'n' and box[i + 1][0] == 'a' and box[i + 2][0] == 'm' and box[i + 3][0] == 'e'):
for j in range(0, 4):
flag = True
coordinate.append(box[i+j])
if(flag):
break
coorE = coordinate[3].split(" ")
return (( int(coorE[1]) , height - int(coorE[4])), ( int(coorE[3]), height - int(coorE[2])))
def similar(a, b):
return SequenceMatcher(None, a.replace(" ", "").lower(), b.replace(" ", "").lower()).ratio()
# swap the position of first name and last name
def swap(name):
beforespace = ""
afterspace = ""
space = False
for letter in name:
if letter == ' ' or letter == ',':
space = True
elif space:
afterspace += letter
else:
beforespace += letter
return afterspace + " " + beforespace
def recog_name(image):
return image_to_string(image,
config="--psm 6 -c tessedit_char_whitelist=-ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz").strip()
def image_proc(image, n_pos):
# calculate bounds of interest
dh = n_pos[1][1] - n_pos[0][1]
upper = n_pos[0][1] - 2 * dh
lower = n_pos[1][1] + int(3.5 * dh)
left = n_pos[1][0]
right = left + 40 * (n_pos[1][0] - n_pos[0][0])
crop_img = image[ upper:lower, left:right]
cv2.imwrite("temp.png", crop_img)
time.sleep(10)
image = Image.open("temp.png")
return image
# end of handwriting
#============================================================
def gradeAnswer(correct_answer,answer):
temp = ""
result = []
for letter in correct_answer:
if letter.isalpha()==True :
temp += letter
correct_answer = temp
if len(correct_answer) != len(answer):
return None
for i in range(len(answer)):
temp = []
if answer[i] != correct_answer[i]:
temp.append(i+1)
temp.append(answer[i])
temp.append(correct_answer[i])
result.append(temp)
return result
def printTime(timeBegin):
timeEnd = time.time()
# print("Time consuming is {:}".format(timeEnd-timeBegin))
return timeEnd
'''
@parameter: image1: the path of the input image which need to be graded
'''
def grading(img_file, answer_file,recog = False):
# sshtunnel.SSH_TIMEOUT = 300.0
# sshtunnel.TUNNEL_TIMEOUT = 300.0
# with sshtunnel.SSHTunnelForwarder(
# ('ssh.pythonanywhere.com'),
# ssh_username='Gengruijie', ssh_password='Grj12345',
# remote_bind_address=('Gengruijie.mysql.pythonanywhere-services.com', 3306)
# ) as tunnel:
# connection = mysql.connector.connect(
# user='Gengruijie', password='GRJ12345',
# host='127.0.0.1', port=tunnel.local_bind_port,
# database='Gengruijie$AutoGrading',
# )
# query = "SELECT name from main"
# cursor = MySQLCursor(connection)
# cursor.execute(query)
# names = cursor.fetchall()
#
# print("Begin to grade answer sheet")
# temp = []
# for name in names:
# temp.append(name[0])
# names = temp
# myPath = os.path.dirname(os.path.realpath(__file__))
# myPath = os.path.split(myPath)[0] + "/web/static/upload/unclassify"
# myPathImage = image1
# myPathAnswer = myPath + "/answer.txt"
image = cv2.imread(image_file)
answerFile = open(answer_file)
timeBegin = time.time()
# print("Begin to process the image")
# the paper is almost 3000*2000
centerOfPaper = (image.shape[0]/2, image.shape[1]/2)
# now the centre is (x,y)
centerOfPaper = (centerOfPaper[1],centerOfPaper[0])
answerSheet = AnswerSheet(centerOfPaper)
res = image
# convert image to grayscale
gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
# blur the image slightly to remove noise.
# gray = cv2.bilateralFilter(gray, 11, 17, 17)
gray = cv2.GaussianBlur(gray, (5, 5), 0) # is an alternative way to blur the image
# canny edge detection
edged = cv2.Canny(gray, 30, 200)
# two threshold method.
# The first one is normal threshold method
# The second one is use Gaussian method which has better effect.
ret,thresh1 = cv2.threshold(gray,150,150,cv2.THRESH_BINARY)
thresh1=cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
# print("Finish to process the image")
# timeBegin = printTime(timeBegin)
answerSheet.setThreshold(thresh1)
# print("Begin to find the counters of the answer sheet")
try:
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
except:
(_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:200]
# print("Finish to find the counters of the answer sheet")
timeBegin = printTime(timeBegin)
# print("Begin to find counter around the centre of answer sheet")
listOfContourObject = []
for c in cnts:
listOfContourObject.append(Box(c))
distanceFromCentre = []
for box in listOfContourObject:
boxCentre = box.getCentre()
distance = math.sqrt((boxCentre[0]-centerOfPaper[0])**2 + (boxCentre[1]-centerOfPaper[1])**2 )
distanceFromCentre.append((distance,box,box.getArea()))
distanceFromCentre.sort()
# print("Finish to find counter around the centre of answer sheet")
timeBegin = printTime(timeBegin)
# print("To get the area of the answer box.")
# to get the area of the answer box.
answerSheet.findBoxArea(distanceFromCentre)
# print("Finish to get the area of the answer box.")
timeBegin = printTime(timeBegin)
# print("Begin to determine the box which is answer box")
# to determine the box which is answer box
answerSheet.findAnswerBox(listOfContourObject)
# print("Finish to determine the box which is answer box")
timeBegin = printTime(timeBegin)
# print("Begin to find length and height and difference between box")
# find length and height of the box
answerSheet.findLengthAndHeight()
answerSheet.findDistanceBetweenAnswerBoxAndNumberOfChoice()
# print("Finish to find length and height and difference between box")
timeBegin = printTime(timeBegin)
# print("Begin to locate the question")
answerSheet.locateQuestion()
# print("Finish to locate the question")
timeBegin = printTime(timeBegin)
# print("Begin to get the answer from sheet and file, and grade.")
# to get the answer from sheet and file, and grade.
answerFile = answerFile.read()
correctAnswer = answerFile.split("\n")
studentAnswer = answerSheet.getAnswer()
result = gradeAnswer(correctAnswer,studentAnswer)
score = str(len(studentAnswer)-len(result)) +"/" + str(len(studentAnswer))
# print("Finish to get the answer from sheet and file, and grade.")
timeBegin = printTime(timeBegin)
if recog == False:
return len(studentAnswer)-len(result)
# =======================================================================================
# next part is handwriting recognition
# this is still developing part
# =======================================================================================
"""
print("Begin to recongnize the handwriting")
filename = myPathImage
n_pos = findcoordinateOfName(filename)
image = image_proc(image, n_pos)
# string = recogRuijie(image)
string = recog_name(image)
print("End to recongnize the handwriting")
timeBegin = printTime(timeBegin)
print("Begin to compare the name with database")
os.remove("temp.png")
m_sim = 0
m_name = ""
searchName = ""
names = getNameFromDatabse()
for name in names:
# get the name with maximum similarity
# we don't make any assumption of the order of first name and last name
# check both possibilityies
# we compare the higher of both with results from other strings
sim = max(similar(string, name), similar(string, swap(name)))
if sim > m_sim:
m_name = name
searchName = name
m_sim = sim
if (m_name != ""):
updateScore(score,searchName)
else:
print("Recognition Failed")
print("Finish to compare the name with database")
printTime(timeBegin)
"""
if __name__ == '__main__':
"""
For DEMO
"""
# answer sheet and answer
answer_file = "test_file/answer.txt"
image_file ="test_file/Answer_sheet.png"
# the result is the integer
result = grading(image_file, answer_file)
print("The result of the answer sheet is {}".format(result))
db = database.Database("Ruijie", "12345678", "142.93.59.116", "Student_grade")
print(db.describe_table("GRADE"))
db.insert_data(["hubert", result], table="GRADE")
print(db.queryData("GRADE"))
| mit |
anntzer/scikit-learn | examples/feature_selection/plot_f_test_vs_mi.py | 82 | 1671 | """
===========================================
Comparison of F-test and mutual information
===========================================
This example illustrates the differences between univariate F-test statistics
and mutual information.
We consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the
target depends on them as follows:
y = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third features is
completely irrelevant.
The code below plots the dependency of y against individual x_i and normalized
values of univariate F-tests statistics and mutual information.
As F-test captures only linear dependency, it rates x_1 as the most
discriminative feature. On the other hand, mutual information can capture any
kind of dependency between variables and it rates x_2 as the most
discriminative feature, which probably agrees better with our intuitive
perception for this example. Both methods correctly marks x_3 as irrelevant.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_selection import f_regression, mutual_info_regression
np.random.seed(0)
X = np.random.rand(1000, 3)
y = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000)
f_test, _ = f_regression(X, y)
f_test /= np.max(f_test)
mi = mutual_info_regression(X, y)
mi /= np.max(mi)
plt.figure(figsize=(15, 5))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.scatter(X[:, i], y, edgecolor='black', s=20)
plt.xlabel("$x_{}$".format(i + 1), fontsize=14)
if i == 0:
plt.ylabel("$y$", fontsize=14)
plt.title("F-test={:.2f}, MI={:.2f}".format(f_test[i], mi[i]),
fontsize=16)
plt.show()
| bsd-3-clause |
clips/news-audit | SensationalismClassifier/SensationalismClassifier.py | 1 | 10603 | __author__ = "Masha Ivenskaya"
from argparse import ArgumentParser
import cPickle as pickle
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
from collections import defaultdict
import string
import sys
from pattern.db import Datasheet
from pattern.db import pd
import nltk
from random import shuffle
from pattern.en import sentiment
from pattern.en.wordlist import PROFANITY
from time import strftime
from time import time
import logging
class ItemSelector(BaseEstimator, TransformerMixin):
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class Punct_Stats(BaseEstimator, TransformerMixin):
"""Extract punctuation features from each document"""
def fit(self, x, y=None):
return self
def transform(self, text_fields):
punct_stats = []
punctuations = list(string.punctuation)
additional_punc = ['``', '--', '\'\'']
punctuations.extend(additional_punc)
for field in text_fields:
puncts = defaultdict(int)
for ch in field:
if ch in punctuations:
puncts[ch]+=1
punct_stats.append(puncts)
return punct_stats
class Text_Stats(BaseEstimator, TransformerMixin):
"""Extract text statistics from each document"""
def fit(self, x, y=None):
return self
def transform(self, text_fields):
stats = []
punctuation = string.punctuation
abvs = ['CNN', 'FBI', 'ABC', 'MSNBC', 'GOP', 'U.S.', 'US', 'ISIS', 'DNC', 'TV', 'CIA', 'I', 'AP', 'PM', 'AM', 'EU', 'USA', 'UK', 'UN', 'CEO', 'NASA', 'LGBT', 'LGBTQ', 'NAFTA', 'ACLU']
for field in text_fields:
field_stats = {}
tok_text = nltk.word_tokenize(field)
try:
num_upper = float(len([w for w in tok_text if w.isupper() and w not in abvs]))/len(tok_text)
except:
num_upper = 0
try:
num_punct = float(len([ch for ch in field if ch in punctuation]))/len(field)
except:
num_punct = 0
try:
sent_lengths = [len(nltk.word_tokenize(s)) for s in nltk.sent_tokenize(field)]
av_sent_len = float(sum(sent_lengths))/len(sent_lengths)
except:
av_sent_len = 0
try:
num_prof = float(len([w for w in tok_text if w.lower() in PROFANITY]))/len(tok_text)
except:
num_prof = 0
polarity, subjectivity = sentiment(field)
field_stats['all_caps'] = num_upper
field_stats['sent_len'] = av_sent_len
field_stats['polarity'] = polarity
field_stats['subjectivity'] = subjectivity
field_stats['profanity'] = num_prof
stats.append(field_stats)
return stats
class HeadlineBodyFeaturesExtractor(BaseEstimator, TransformerMixin):
"""Extracts the components of each input in the data: headline, body, and POS tags for each"""
def fit(self, x, y=None):
return self
def transform(self, posts):
punctuation = string.punctuation
features = np.recarray(shape=(len(posts),), dtype=[('headline', object), ('article_body', object), ('headline_pos', object), ('body_pos', object)])
for i, post in enumerate(posts):
headline, article = post[:2]
features['headline'][i] = headline
features['article_body'][i] = article
tok_headline = nltk.word_tokenize(headline)
features['headline_pos'][i] = (' ').join([x[1] for x in nltk.pos_tag(tok_headline)])
tok_article = nltk.word_tokenize(article)
features['body_pos'][i] = (' ').join([x[1] for x in nltk.pos_tag(tok_article)])
return features
class SensationalismClassifier(object):
def __init__(self, model=None, train=True, train_data=None,
dump=False, debug=False):
"""Intialize classifier, either from pre-trained model or from scratch"""
self.debug = debug
if model:
try:
self.pipeline = self.load_model(model)
self.model_name = model
except Exception as e_load:
logging.critical(str(e_load))
self.classifier = None
else:
self.pipeline = self.train(train_data)
if dump:
self.dump_model()
def load_model(self, model_file=None):
""" Load model from pre-trained pickle"""
if self.debug:
logging.info("Loading model %s" % model_file)
try:
with open(model_file, "rb") as pkl:
pipeline = pickle.load(pkl)
except (IOError, pickle.UnpicklingError) as e:
logging.critical(str(e))
raise e
return pipeline
def dump_model(self, model_file="model_%s.pkl" % strftime("%Y%m%d_%H%M")):
""" Pickle trained model """
if self.debug:
logging.info("Dumping model to %s" % model_file)
with open(model_file, "wb") as f_pkl:
try:
pickle.dump(self.pipeline, f_pkl, pickle.HIGHEST_PROTOCOL)
self.model_name = model_file
except pickle.PicklingError as e_pkl:
print str(e_pkl) + ": continuing without dumping."
def train(self, train_path):
""" Train classifier on features from headline and article text """
if self.debug:
tick = time()
logging.info("Training new model with %s" % (train_path,))
logging.info("Loading/shuffling training data...")
train_data = Datasheet.load(train_path)
shuffle(train_data)
train_texts = zip(train_data.columns[0], train_data.columns[1])
train_labels = train_data.columns[-1]
pipeline = Pipeline([
# Extract the subject & body
('HeadlineBodyFeatures', HeadlineBodyFeaturesExtractor()),
# Use FeatureUnion to combine the features from subject and body
('union', FeatureUnion(
transformer_list=[
#Pipeline for pulling features from articles
('punct_stats_headline', Pipeline([
('selector', ItemSelector(key='headline')),
('stats', Punct_Stats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
('punct_stats_body', Pipeline([
('selector', ItemSelector(key='article_body')),
('stats', Punct_Stats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
('pos_ngrams_headline', Pipeline([
('selector', ItemSelector(key='headline_pos')),
('vect', CountVectorizer(ngram_range=(1,2), token_pattern = r'\b\w+\b', max_df = 0.5)),
])),
('pos_ngrams_body', Pipeline([
('selector', ItemSelector(key='body_pos')),
('vect', CountVectorizer(ngram_range=(1,2), token_pattern = r'\b\w+\b', max_df = 0.5)),
])),
('text_stats_headline', Pipeline([
('selector', ItemSelector(key='headline')),
('stats', Text_Stats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
('text_stats_body', Pipeline([
('selector', ItemSelector(key='article_body')),
('stats', Text_Stats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
],
)),
# Use an SVC classifier on the combined features
('svc', SVC(C=1.0)),
])
if self.debug:
logging.info('Fitting training data')
pipeline.fit(train_texts, train_labels)
if self.debug:
logging.info("Done in %0.2fs" % (time() - tick,))
return pipeline
def classify(self, inputs):
""" Classifies inputs """
responses = []
results = self.pipeline.predict(inputs)
for i, line in enumerate(inputs):
line.append(results[i])
responses.append(line)
return responses
def main():
logging.basicConfig(level=logging.INFO)
argparser = ArgumentParser(description=__doc__)
argparser.add_argument("-t", "--trainset", action="store",
default=None,
help=("Path to training data "
"[default: %(default)s]"))
argparser.add_argument("-m", "--model", action="store",
help="Path to model")
argparser.add_argument("-d", "--dump", action="store_true",
help="Pickle trained model? [default: False]")
argparser.add_argument("-v", "--verbose", action="store_true",
default=False,
help="Verbose [default: quiet]")
argparser.add_argument("-c", "--classify", action="store",
default=None,
help=("Path to data to classify "
"[default: %(default)s]"))
argparser.add_argument("-s", "--save", action="store",
default='output.csv',
help=("Path to output file"
"[default = output.csv]"))
args = argparser.parse_args()
clf = SensationalismClassifier(train_data=args.trainset,
model=args.model,
dump=args.dump,
debug=args.verbose)
if args.classify:
OUTPUT_PATH = args.save
if clf.debug:
tick = time()
to_classify = Datasheet.load(args.classify)
classified_data = clf.classify(to_classify)
output = Datasheet(classified_data)
output.save(pd(OUTPUT_PATH))
if clf.debug:
sys.stderr.write("\nProcessed %d items in %0.2fs" %
(len(classified_data), time() - tick))
if __name__ == "__main__":
main() | gpl-3.0 |
kashif/scikit-learn | sklearn/utils/tests/test_fixes.py | 281 | 1829 | # Authors: Gael Varoquaux <[email protected]>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import numpy as np
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal)
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
| bsd-3-clause |
soulmachine/scikit-learn | sklearn/metrics/setup.py | 299 | 1024 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
openfisca/openfisca-matplotlib | openfisca_matplotlib/tests/test_dataframes.py | 1 | 2507 | # -*- coding: utf-8 -*-
from __future__ import division
import os
from openfisca_core.decompositions import get_decomposition_json
from openfisca_france import FranceTaxBenefitSystem
from openfisca_matplotlib.tests.test_graphs import create_simulation
from openfisca_matplotlib.dataframes import data_frame_from_decomposition_json
tax_benefit_system = FranceTaxBenefitSystem()
def test():
reform_simulation, reference_simulation = create_simulation()
data_frame = data_frame_from_decomposition_json(
reform_simulation,
decomposition_json = None,
reference_simulation = reference_simulation,
)
return data_frame
def test_bareme():
reform_simulation, reference_simulation = create_simulation(bareme = True)
data_frame = data_frame_from_decomposition_json(
reform_simulation,
decomposition_json = None,
reference_simulation = reference_simulation,
)
return data_frame
def test_remove_null():
reform_simulation, reference_simulation = create_simulation()
data_frame = data_frame_from_decomposition_json(
reform_simulation,
decomposition_json = None,
reference_simulation = reference_simulation,
remove_null = True)
return data_frame
def test_fiche_de_paie():
reform_simulation, reference_simulation = create_simulation()
xml_file_path = os.path.join(
os.path.dirname(tax_benefit_system.decomposition_file_path),
"fiche_de_paie_decomposition.xml"
)
decomposition_json = get_decomposition_json(tax_benefit_system, xml_file_path)
data_frame = data_frame_from_decomposition_json(
reform_simulation,
decomposition_json = decomposition_json,
reference_simulation = reference_simulation,
remove_null = True)
return data_frame
def test_fiche_de_paie_bareme(bareme=True):
reform_simulation, reference_simulation = create_simulation(bareme=bareme)
xml_file_path = os.path.join(
os.path.dirname(tax_benefit_system.decomposition_file_path),
"fiche_de_paie_decomposition.xml"
)
decomposition_json = get_decomposition_json(tax_benefit_system, xml_file_path)
data_frame = data_frame_from_decomposition_json(
reference_simulation,
decomposition_json = decomposition_json,
remove_null = True)
return data_frame
if __name__ == '__main__':
# test()
# df = test_remove_null()
df = test_fiche_de_paie_bareme()
print(df)
| agpl-3.0 |
tmhm/scikit-learn | examples/cluster/plot_color_quantization.py | 297 | 3443 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1]
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |
mhdella/scikit-learn | examples/applications/topics_extraction_with_nmf_lda.py | 133 | 3517 | """
========================================================================================
Topics extraction with Non-Negative Matrix Factorization And Latent Dirichlet Allocation
========================================================================================
This is an example of applying Non Negative Matrix Factorization
and Latent Dirichlet Allocation on a corpus of documents and
extract additive models of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck <[email protected]>
# Chyi-Kwei Yau <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
t0 = time()
print("Loading dataset and extracting features...")
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data[:n_samples]
# use tf-idf feature for NMF model
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tfidf = tfidf_vectorizer.fit_transform(data_samples)
# use tf feature for LDA model
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with tf-idf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model:")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("\nFitting LDA models with tf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online', learning_offset=50.,
random_state=0)
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
| bsd-3-clause |
benanne/kaggle-galaxies | predict_augmented_npy_8433n_maxout2048.py | 7 | 9458 | """
Load an analysis file and redo the predictions on the validation set / test set,
this time with augmented data and averaging. Store them as numpy files.
"""
import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
BATCH_SIZE = 32 # 16
NUM_INPUT_FEATURES = 3
CHUNK_SIZE = 8000 # 10000 # this should be a multiple of the batch size
# ANALYSIS_PATH = "analysis/try_convnet_cc_multirot_3x69r45_untied_bias.pkl"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_8433n_maxout2048.pkl"
DO_VALID = True # disable this to not bother with the validation set evaluation
DO_TEST = True # disable this to not generate predictions on the testset
target_filename = os.path.basename(ANALYSIS_PATH).replace(".pkl", ".npy.gz")
target_path_valid = os.path.join("predictions/final/augmented/valid", target_filename)
target_path_test = os.path.join("predictions/final/augmented/test", target_filename)
print "Loading model data etc."
analysis = np.load(ANALYSIS_PATH)
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)]
num_input_representations = len(ds_transforms)
# split training data into training + a small validation set
num_train = load_data.num_train
num_valid = num_train // 10 # integer division
num_train -= num_valid
num_test = load_data.num_test
valid_ids = load_data.train_ids[num_train:]
train_ids = load_data.train_ids[:num_train]
test_ids = load_data.test_ids
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train+num_valid)
test_indices = np.arange(num_test)
y_valid = np.load("data/solutions_train.npy")[num_train:]
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=8, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=4, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
# l4 = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5)
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens)
print "Load model parameters"
layers.set_param_values(l6, analysis['param_values'])
print "Create generators"
# set here which transforms to use to make predictions
augmentation_transforms = []
for zoom in [1 / 1.2, 1.0, 1.2]:
for angle in np.linspace(0, 360, 10, endpoint=False):
augmentation_transforms.append(ra.build_augmentation_transform(rotation=angle, zoom=zoom))
augmentation_transforms.append(ra.build_augmentation_transform(rotation=(angle + 180), zoom=zoom, shear=180)) # flipped
print " %d augmentation transforms." % len(augmentation_transforms)
augmented_data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms)
valid_gen = load_data.buffered_gen_mp(augmented_data_gen_valid, buffer_size=1)
augmented_data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms)
test_gen = load_data.buffered_gen_mp(augmented_data_gen_test, buffer_size=1)
approx_num_chunks_valid = int(np.ceil(num_valid * len(augmentation_transforms) / float(CHUNK_SIZE)))
approx_num_chunks_test = int(np.ceil(num_test * len(augmentation_transforms) / float(CHUNK_SIZE)))
print "Approximately %d chunks for the validation set" % approx_num_chunks_valid
print "Approximately %d chunks for the test set" % approx_num_chunks_test
if DO_VALID:
print
print "VALIDATION SET"
print "Compute predictions"
predictions_list = []
start_time = time.time()
for e, (chunk_data, chunk_length) in enumerate(valid_gen):
print "Chunk %d" % (e + 1)
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
print " load data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
# make predictions, don't forget to cute off the zeros at the end
predictions_chunk_list = []
for b in xrange(num_batches_chunk):
if b % 1000 == 0:
print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_chunk_list.append(predictions)
predictions_chunk = np.vstack(predictions_chunk_list)
predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding
print " compute average over transforms"
predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1)
predictions_list.append(predictions_chunk_avg)
time_since_start = time.time() - start_time
print " %s since start" % load_data.hms(time_since_start)
all_predictions = np.vstack(predictions_list)
print "Write predictions to %s" % target_path_valid
load_data.save_gz(target_path_valid, all_predictions)
print "Evaluate"
rmse_valid = analysis['losses_valid'][-1]
rmse_augmented = np.sqrt(np.mean((y_valid - all_predictions)**2))
print " MSE (last iteration):\t%.6f" % rmse_valid
print " MSE (augmented):\t%.6f" % rmse_augmented
if DO_TEST:
print
print "TEST SET"
print "Compute predictions"
predictions_list = []
start_time = time.time()
for e, (chunk_data, chunk_length) in enumerate(test_gen):
print "Chunk %d" % (e + 1)
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
print " load data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
# make predictions, don't forget to cute off the zeros at the end
predictions_chunk_list = []
for b in xrange(num_batches_chunk):
if b % 1000 == 0:
print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_chunk_list.append(predictions)
predictions_chunk = np.vstack(predictions_chunk_list)
predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding
print " compute average over transforms"
predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1)
predictions_list.append(predictions_chunk_avg)
time_since_start = time.time() - start_time
print " %s since start" % load_data.hms(time_since_start)
all_predictions = np.vstack(predictions_list)
print "Write predictions to %s" % target_path_test
load_data.save_gz(target_path_test, all_predictions)
print "Done!"
| bsd-3-clause |
Janderson/analise_dados_ob | python/backtest.py | 1 | 1739 | from abc import ABCMeta, abstractmethod
class StrategyBO(object):
"""Strategy is an abstract base class providing an interface for
all subsequent (inherited) trading strategies.
The goal of a (derived) Strategy object is to output a list of signals,
which has the form of a time series indexed pandas DataFrame.
In this instance only a single symbol/instrument is supported."""
__metaclass__ = ABCMeta
@abstractmethod
def generate_signals(self):
"""An implementation is required to return the DataFrame of symbols
containing the signals to go long, short or hold (1, -1 or 0)."""
raise NotImplementedError("Should implement generate_signals()!")
class Portfolio(object):
"""An abstract base class representing a portfolio of
positions (including both instruments and cash), determined
on the basis of a set of signals provided by a Strategy."""
__metaclass__ = ABCMeta
@abstractmethod
def generate_positions(self):
"""Provides the logic to determine how the portfolio
positions are allocated on the basis of forecasting
signals and available cash."""
raise NotImplementedError("Should implement generate_positions()!")
@abstractmethod
def backtest_portfolio(self):
"""Provides the logic to generate the trading orders
and subsequent equity curve (i.e. growth of total equity),
as a sum of holdings and cash, and the bar-period returns
associated with this curve based on the 'positions' DataFrame.
Produces a portfolio object that can be examined by
other classes/functions."""
raise NotImplementedError("Should implement backtest_portfolio()!") | gpl-3.0 |
planetarymike/IDL-Colorbars | IDL_py_test/012_16_LEVEL.py | 1 | 5535 | from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
cm_data = [[0., 0., 0.],
[0., 0.329412, 0.],
[0., 0.329412, 0.],
[0., 0.329412, 0.],
[0., 0.329412, 0.],
[0., 0.329412, 0.],
[0., 0.329412, 0.],
[0., 0.329412, 0.],
[0., 0.329412, 0.],
[0., 0.329412, 0.],
[0., 0.329412, 0.],
[0., 0.329412, 0.],
[0., 0.329412, 0.],
[0., 0.329412, 0.],
[0., 0.329412, 0.],
[0., 0.329412, 0.],
[0., 0.658824, 0.],
[0., 0.658824, 0.],
[0., 0.658824, 0.],
[0., 0.658824, 0.],
[0., 0.658824, 0.],
[0., 0.658824, 0.],
[0., 0.658824, 0.],
[0., 0.658824, 0.],
[0., 0.658824, 0.],
[0., 0.658824, 0.],
[0., 0.658824, 0.],
[0., 0.658824, 0.],
[0., 0.658824, 0.],
[0., 0.658824, 0.],
[0., 0.658824, 0.],
[0., 0.658824, 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.329412],
[0., 1., 0.329412],
[0., 1., 0.329412],
[0., 1., 0.329412],
[0., 1., 0.329412],
[0., 1., 0.329412],
[0., 1., 0.329412],
[0., 1., 0.329412],
[0., 1., 0.329412],
[0., 1., 0.329412],
[0., 1., 0.329412],
[0., 1., 0.329412],
[0., 1., 0.329412],
[0., 1., 0.329412],
[0., 1., 0.329412],
[0., 1., 0.329412],
[0., 1., 0.658824],
[0., 1., 0.658824],
[0., 1., 0.658824],
[0., 1., 0.658824],
[0., 1., 0.658824],
[0., 1., 0.658824],
[0., 1., 0.658824],
[0., 1., 0.658824],
[0., 1., 0.658824],
[0., 1., 0.658824],
[0., 1., 0.658824],
[0., 1., 0.658824],
[0., 1., 0.658824],
[0., 1., 0.658824],
[0., 1., 0.658824],
[0., 1., 0.658824],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 0., 1.],
[0., 0., 1.],
[0., 0., 1.],
[0., 0., 1.],
[0., 0., 1.],
[0., 0., 1.],
[0., 0., 1.],
[0., 0., 1.],
[0., 0., 1.],
[0., 0., 1.],
[0., 0., 1.],
[0., 0., 1.],
[0., 0., 1.],
[0., 0., 1.],
[0., 0., 1.],
[0., 0., 1.],
[0.501961, 0., 1.],
[0.501961, 0., 1.],
[0.501961, 0., 1.],
[0.501961, 0., 1.],
[0.501961, 0., 1.],
[0.501961, 0., 1.],
[0.501961, 0., 1.],
[0.501961, 0., 1.],
[0.501961, 0., 1.],
[0.501961, 0., 1.],
[0.501961, 0., 1.],
[0.501961, 0., 1.],
[0.501961, 0., 1.],
[0.501961, 0., 1.],
[0.501961, 0., 1.],
[0.501961, 0., 1.],
[1., 0., 0.862745],
[1., 0., 0.862745],
[1., 0., 0.862745],
[1., 0., 0.862745],
[1., 0., 0.862745],
[1., 0., 0.862745],
[1., 0., 0.862745],
[1., 0., 0.862745],
[1., 0., 0.862745],
[1., 0., 0.862745],
[1., 0., 0.862745],
[1., 0., 0.862745],
[1., 0., 0.862745],
[1., 0., 0.862745],
[1., 0., 0.862745],
[1., 0., 0.862745],
[1., 0., 0.705882],
[1., 0., 0.705882],
[1., 0., 0.705882],
[1., 0., 0.705882],
[1., 0., 0.705882],
[1., 0., 0.705882],
[1., 0., 0.705882],
[1., 0., 0.705882],
[1., 0., 0.705882],
[1., 0., 0.705882],
[1., 0., 0.705882],
[1., 0., 0.705882],
[1., 0., 0.705882],
[1., 0., 0.705882],
[1., 0., 0.705882],
[1., 0., 0.705882],
[1., 0., 0.501961],
[1., 0., 0.501961],
[1., 0., 0.501961],
[1., 0., 0.501961],
[1., 0., 0.501961],
[1., 0., 0.501961],
[1., 0., 0.501961],
[1., 0., 0.501961],
[1., 0., 0.501961],
[1., 0., 0.501961],
[1., 0., 0.501961],
[1., 0., 0.501961],
[1., 0., 0.501961],
[1., 0., 0.501961],
[1., 0., 0.501961],
[1., 0., 0.501961],
[1., 0., 0.25098],
[1., 0., 0.25098],
[1., 0., 0.25098],
[1., 0., 0.25098],
[1., 0., 0.25098],
[1., 0., 0.25098],
[1., 0., 0.25098],
[1., 0., 0.25098],
[1., 0., 0.25098],
[1., 0., 0.25098],
[1., 0., 0.25098],
[1., 0., 0.25098],
[1., 0., 0.25098],
[1., 0., 0.25098],
[1., 0., 0.25098],
[1., 0., 0.25098],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[0.862745, 0.745098, 0.745098],
[0.862745, 0.745098, 0.745098],
[0.862745, 0.745098, 0.745098],
[0.862745, 0.745098, 0.745098],
[0.862745, 0.745098, 0.745098],
[0.866667, 0.745098, 0.745098],
[0.866667, 0.745098, 0.745098],
[0.866667, 0.745098, 0.745098],
[0.866667, 0.745098, 0.745098],
[0.866667, 0.745098, 0.745098],
[0.870588, 0.745098, 0.745098],
[0.870588, 0.745098, 0.745098],
[0.870588, 0.745098, 0.745098],
[0.870588, 0.745098, 0.745098],
[0.870588, 0.745098, 0.745098],
[0.87451, 0.745098, 0.745098],
[0.862745, 0.862745, 0.862745],
[0.862745, 0.862745, 0.862745],
[0.862745, 0.862745, 0.862745],
[0.862745, 0.862745, 0.862745],
[0.862745, 0.862745, 0.862745],
[0.862745, 0.862745, 0.862745],
[0.862745, 0.862745, 0.862745],
[0.862745, 0.862745, 0.862745],
[0.862745, 0.862745, 0.862745],
[0.862745, 0.862745, 0.862745],
[0.862745, 0.862745, 0.862745],
[0.862745, 0.862745, 0.862745],
[0.862745, 0.862745, 0.862745],
[0.862745, 0.862745, 0.862745],
[0.862745, 0.862745, 0.862745],
[0.862745, 0.862745, 0.862745],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from pycam02ucs.cm.viscm import viscm
viscm(test_cm)
except ImportError:
print("pycam02ucs not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
| gpl-2.0 |
AlirezaShahabi/zipline | zipline/finance/risk/period.py | 17 | 11952 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logbook
import math
import numpy as np
import numpy.linalg as la
from six import iteritems
from zipline.finance import trading
import pandas as pd
from . import risk
from . risk import (
alpha,
check_entry,
downside_risk,
information_ratio,
sharpe_ratio,
sortino_ratio,
)
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
log = logbook.Logger('Risk Period')
choose_treasury = functools.partial(risk.choose_treasury,
risk.select_treasury_duration)
class RiskMetricsPeriod(object):
def __init__(self, start_date, end_date, returns,
benchmark_returns=None,
algorithm_leverages=None):
treasury_curves = trading.environment.treasury_curves
if treasury_curves.index[-1] >= start_date:
mask = ((treasury_curves.index >= start_date) &
(treasury_curves.index <= end_date))
self.treasury_curves = treasury_curves[mask]
else:
# our test is beyond the treasury curve history
# so we'll use the last available treasury curve
self.treasury_curves = treasury_curves[-1:]
self.start_date = start_date
self.end_date = end_date
if benchmark_returns is None:
br = trading.environment.benchmark_returns
benchmark_returns = br[(br.index >= returns.index[0]) &
(br.index <= returns.index[-1])]
self.algorithm_returns = self.mask_returns_to_period(returns)
self.benchmark_returns = self.mask_returns_to_period(benchmark_returns)
self.algorithm_leverages = algorithm_leverages
self.calculate_metrics()
def calculate_metrics(self):
self.benchmark_period_returns = \
self.calculate_period_returns(self.benchmark_returns)
self.algorithm_period_returns = \
self.calculate_period_returns(self.algorithm_returns)
if not self.algorithm_returns.index.equals(
self.benchmark_returns.index
):
message = "Mismatch between benchmark_returns ({bm_count}) and \
algorithm_returns ({algo_count}) in range {start} : {end}"
message = message.format(
bm_count=len(self.benchmark_returns),
algo_count=len(self.algorithm_returns),
start=self.start_date,
end=self.end_date
)
raise Exception(message)
self.num_trading_days = len(self.benchmark_returns)
self.trading_day_counts = pd.stats.moments.rolling_count(
self.algorithm_returns, self.num_trading_days)
self.mean_algorithm_returns = pd.Series(
index=self.algorithm_returns.index)
for dt, ret in self.algorithm_returns.iteritems():
self.mean_algorithm_returns[dt] = (
self.algorithm_returns[:dt].sum() /
self.trading_day_counts[dt])
self.benchmark_volatility = self.calculate_volatility(
self.benchmark_returns)
self.algorithm_volatility = self.calculate_volatility(
self.algorithm_returns)
self.treasury_period_return = choose_treasury(
self.treasury_curves,
self.start_date,
self.end_date
)
self.sharpe = self.calculate_sharpe()
# The consumer currently expects a 0.0 value for sharpe in period,
# this differs from cumulative which was np.nan.
# When factoring out the sharpe_ratio, the different return types
# were collapsed into `np.nan`.
# TODO: Either fix consumer to accept `np.nan` or make the
# `sharpe_ratio` return type configurable.
# In the meantime, convert nan values to 0.0
if pd.isnull(self.sharpe):
self.sharpe = 0.0
self.sortino = self.calculate_sortino()
self.information = self.calculate_information()
self.beta, self.algorithm_covariance, self.benchmark_variance, \
self.condition_number, self.eigen_values = self.calculate_beta()
self.alpha = self.calculate_alpha()
self.excess_return = self.algorithm_period_returns - \
self.treasury_period_return
self.max_drawdown = self.calculate_max_drawdown()
self.max_leverage = self.calculate_max_leverage()
def to_dict(self):
"""
Creates a dictionary representing the state of the risk report.
Returns a dict object of the form:
"""
period_label = self.end_date.strftime("%Y-%m")
rval = {
'trading_days': self.num_trading_days,
'benchmark_volatility': self.benchmark_volatility,
'algo_volatility': self.algorithm_volatility,
'treasury_period_return': self.treasury_period_return,
'algorithm_period_return': self.algorithm_period_returns,
'benchmark_period_return': self.benchmark_period_returns,
'sharpe': self.sharpe,
'sortino': self.sortino,
'information': self.information,
'beta': self.beta,
'alpha': self.alpha,
'excess_return': self.excess_return,
'max_drawdown': self.max_drawdown,
'max_leverage': self.max_leverage,
'period_label': period_label
}
return {k: None if check_entry(k, v) else v
for k, v in iteritems(rval)}
def __repr__(self):
statements = []
metrics = [
"algorithm_period_returns",
"benchmark_period_returns",
"excess_return",
"num_trading_days",
"benchmark_volatility",
"algorithm_volatility",
"sharpe",
"sortino",
"information",
"algorithm_covariance",
"benchmark_variance",
"beta",
"alpha",
"max_drawdown",
"max_leverage",
"algorithm_returns",
"benchmark_returns",
"condition_number",
"eigen_values"
]
for metric in metrics:
value = getattr(self, metric)
statements.append("{m}:{v}".format(m=metric, v=value))
return '\n'.join(statements)
def mask_returns_to_period(self, daily_returns):
if isinstance(daily_returns, list):
returns = pd.Series([x.returns for x in daily_returns],
index=[x.date for x in daily_returns])
else: # otherwise we're receiving an index already
returns = daily_returns
trade_days = trading.environment.trading_days
trade_day_mask = returns.index.normalize().isin(trade_days)
mask = ((returns.index >= self.start_date) &
(returns.index <= self.end_date) & trade_day_mask)
returns = returns[mask]
return returns
def calculate_period_returns(self, returns):
period_returns = (1. + returns).prod() - 1
return period_returns
def calculate_volatility(self, daily_returns):
return np.std(daily_returns, ddof=1) * math.sqrt(self.num_trading_days)
def calculate_sharpe(self):
"""
http://en.wikipedia.org/wiki/Sharpe_ratio
"""
return sharpe_ratio(self.algorithm_volatility,
self.algorithm_period_returns,
self.treasury_period_return)
def calculate_sortino(self):
"""
http://en.wikipedia.org/wiki/Sortino_ratio
"""
mar = downside_risk(self.algorithm_returns,
self.mean_algorithm_returns,
self.num_trading_days)
# Hold on to downside risk for debugging purposes.
self.downside_risk = mar
return sortino_ratio(self.algorithm_period_returns,
self.treasury_period_return,
mar)
def calculate_information(self):
"""
http://en.wikipedia.org/wiki/Information_ratio
"""
return information_ratio(self.algorithm_returns,
self.benchmark_returns)
def calculate_beta(self):
"""
.. math::
\\beta_a = \\frac{\mathrm{Cov}(r_a,r_p)}{\mathrm{Var}(r_p)}
http://en.wikipedia.org/wiki/Beta_(finance)
"""
# it doesn't make much sense to calculate beta for less than two days,
# so return none.
if len(self.algorithm_returns) < 2:
return 0.0, 0.0, 0.0, 0.0, []
returns_matrix = np.vstack([self.algorithm_returns,
self.benchmark_returns])
C = np.cov(returns_matrix, ddof=1)
eigen_values = la.eigvals(C)
condition_number = max(eigen_values) / min(eigen_values)
algorithm_covariance = C[0][1]
benchmark_variance = C[1][1]
beta = algorithm_covariance / benchmark_variance
return (
beta,
algorithm_covariance,
benchmark_variance,
condition_number,
eigen_values
)
def calculate_alpha(self):
"""
http://en.wikipedia.org/wiki/Alpha_(investment)
"""
return alpha(self.algorithm_period_returns,
self.treasury_period_return,
self.benchmark_period_returns,
self.beta)
def calculate_max_drawdown(self):
compounded_returns = []
cur_return = 0.0
for r in self.algorithm_returns:
try:
cur_return += math.log(1.0 + r)
# this is a guard for a single day returning -100%, if returns are
# greater than -1.0 it will throw an error because you cannot take
# the log of a negative number
except ValueError:
log.debug("{cur} return, zeroing the returns".format(
cur=cur_return))
cur_return = 0.0
compounded_returns.append(cur_return)
cur_max = None
max_drawdown = None
for cur in compounded_returns:
if cur_max is None or cur > cur_max:
cur_max = cur
drawdown = (cur - cur_max)
if max_drawdown is None or drawdown < max_drawdown:
max_drawdown = drawdown
if max_drawdown is None:
return 0.0
return 1.0 - math.exp(max_drawdown)
def calculate_max_leverage(self):
if self.algorithm_leverages is None:
return 0.0
else:
return max(self.algorithm_leverages)
def __getstate__(self):
state_dict = \
{k: v for k, v in iteritems(self.__dict__) if
(not k.startswith('_') and not k == 'treasury_curves')}
STATE_VERSION = 2
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 2
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("RiskMetricsPeriod saved state \
is too old.")
self.__dict__.update(state)
self.treasury_curves = trading.environment.treasury_curves
| apache-2.0 |
toastedcornflakes/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 159 | 7852 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |
ptitjano/bokeh | bokeh/core/compat/mplexporter/utils.py | 7 | 11497 | """
Utility Routines for Working with Matplotlib Objects
====================================================
"""
import itertools
import io
import base64
import numpy as np
import warnings
import matplotlib
from matplotlib.colors import colorConverter
from matplotlib.path import Path
from matplotlib.markers import MarkerStyle
from matplotlib.transforms import Affine2D
from matplotlib import ticker
# NOTE: bokeh mod
from bokeh.util.dependencies import import_optional
pd = import_optional('pandas')
def color_to_hex(color):
"""Convert matplotlib color code to hex color code"""
if color is None or colorConverter.to_rgba(color)[3] == 0:
return 'none'
else:
rgb = colorConverter.to_rgb(color)
return '#{0:02X}{1:02X}{2:02X}'.format(*(int(255 * c) for c in rgb))
def _many_to_one(input_dict):
"""Convert a many-to-one mapping to a one-to-one mapping"""
return dict((key, val)
for keys, val in input_dict.items()
for key in keys)
LINESTYLES = _many_to_one({('solid', '-', (None, None)): 'none',
('dashed', '--'): "6,6",
('dotted', ':'): "2,2",
('dashdot', '-.'): "4,4,2,4",
('', ' ', 'None', 'none'): None})
def get_dasharray(obj):
"""Get an SVG dash array for the given matplotlib linestyle
Parameters
----------
obj : matplotlib object
The matplotlib line or path object, which must have a get_linestyle()
method which returns a valid matplotlib line code
Returns
-------
dasharray : string
The HTML/SVG dasharray code associated with the object.
"""
if obj.__dict__.get('_dashSeq') is not None:
return ','.join(map(str, obj._dashSeq))
else:
ls = obj.get_linestyle()
dasharray = LINESTYLES.get(ls, 'not found')
if dasharray == 'not found':
warnings.warn("line style '{0}' not understood: "
"defaulting to solid line.".format(ls))
dasharray = LINESTYLES['solid']
return dasharray
PATH_DICT = {Path.LINETO: 'L',
Path.MOVETO: 'M',
Path.CURVE3: 'S',
Path.CURVE4: 'C',
Path.CLOSEPOLY: 'Z'}
def SVG_path(path, transform=None, simplify=False):
"""Construct the vertices and SVG codes for the path
Parameters
----------
path : matplotlib.Path object
transform : matplotlib transform (optional)
if specified, the path will be transformed before computing the output.
Returns
-------
vertices : array
The shape (M, 2) array of vertices of the Path. Note that some Path
codes require multiple vertices, so the length of these vertices may
be longer than the list of path codes.
path_codes : list
A length N list of single-character path codes, N <= M. Each code is
a single character, in ['L','M','S','C','Z']. See the standard SVG
path specification for a description of these.
"""
if transform is not None:
path = path.transformed(transform)
vc_tuples = [(vertices if path_code != Path.CLOSEPOLY else [],
PATH_DICT[path_code])
for (vertices, path_code)
in path.iter_segments(simplify=simplify)]
if not vc_tuples:
# empty path is a special case
return np.zeros((0, 2)), []
else:
vertices, codes = zip(*vc_tuples)
vertices = np.array(list(itertools.chain(*vertices))).reshape(-1, 2)
return vertices, list(codes)
def get_path_style(path, fill=True):
"""Get the style dictionary for matplotlib path objects"""
style = {}
style['alpha'] = path.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['edgecolor'] = color_to_hex(path.get_edgecolor())
if fill:
style['facecolor'] = color_to_hex(path.get_facecolor())
else:
style['facecolor'] = 'none'
style['edgewidth'] = path.get_linewidth()
style['dasharray'] = get_dasharray(path)
style['zorder'] = path.get_zorder()
return style
def get_line_style(line):
"""Get the style dictionary for matplotlib line objects"""
style = {}
style['alpha'] = line.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['color'] = color_to_hex(line.get_color())
style['linewidth'] = line.get_linewidth()
style['dasharray'] = get_dasharray(line)
style['zorder'] = line.get_zorder()
return style
def get_marker_style(line):
"""Get the style dictionary for matplotlib marker objects"""
style = {}
style['alpha'] = line.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['facecolor'] = color_to_hex(line.get_markerfacecolor())
style['edgecolor'] = color_to_hex(line.get_markeredgecolor())
style['edgewidth'] = line.get_markeredgewidth()
style['marker'] = line.get_marker()
markerstyle = MarkerStyle(line.get_marker())
markersize = line.get_markersize()
markertransform = (markerstyle.get_transform()
+ Affine2D().scale(markersize, -markersize))
style['markerpath'] = SVG_path(markerstyle.get_path(),
markertransform)
style['markersize'] = markersize
style['zorder'] = line.get_zorder()
return style
def get_text_style(text):
"""Return the text style dict for a text instance"""
style = {}
style['alpha'] = text.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['fontsize'] = text.get_size()
style['color'] = color_to_hex(text.get_color())
style['halign'] = text.get_horizontalalignment() # left, center, right
style['valign'] = text.get_verticalalignment() # baseline, center, top
style['malign'] = text._multialignment # text alignment when '\n' in text
style['rotation'] = text.get_rotation()
style['zorder'] = text.get_zorder()
return style
def get_axis_properties(axis):
"""Return the property dictionary for a matplotlib.Axis instance"""
props = {}
label1On = axis._major_tick_kw.get('label1On', True)
if isinstance(axis, matplotlib.axis.XAxis):
if label1On:
props['position'] = "bottom"
else:
props['position'] = "top"
elif isinstance(axis, matplotlib.axis.YAxis):
if label1On:
props['position'] = "left"
else:
props['position'] = "right"
else:
raise ValueError("{0} should be an Axis instance".format(axis))
# Use tick values if appropriate
locator = axis.get_major_locator()
props['nticks'] = len(locator())
if isinstance(locator, ticker.FixedLocator):
props['tickvalues'] = list(locator())
else:
props['tickvalues'] = None
# Find tick formats
formatter = axis.get_major_formatter()
if isinstance(formatter, ticker.NullFormatter):
props['tickformat'] = ""
elif isinstance(formatter, ticker.FixedFormatter):
props['tickformat'] = list(formatter.seq)
elif not any(label.get_visible() for label in axis.get_ticklabels()):
props['tickformat'] = ""
else:
props['tickformat'] = None
# Get axis scale
props['scale'] = axis.get_scale()
# Get major tick label size (assumes that's all we really care about!)
labels = axis.get_ticklabels()
if labels:
props['fontsize'] = labels[0].get_fontsize()
else:
props['fontsize'] = None
# Get associated grid
props['grid'] = get_grid_style(axis)
# get axis visibility
props['visible'] = axis.get_visible()
return props
def get_grid_style(axis):
gridlines = axis.get_gridlines()
if axis._gridOnMajor and len(gridlines) > 0:
color = color_to_hex(gridlines[0].get_color())
alpha = gridlines[0].get_alpha()
dasharray = get_dasharray(gridlines[0])
return dict(gridOn=True,
color=color,
dasharray=dasharray,
alpha=alpha)
else:
return {"gridOn": False}
def get_figure_properties(fig):
return {'figwidth': fig.get_figwidth(),
'figheight': fig.get_figheight(),
'dpi': fig.dpi}
def get_axes_properties(ax):
props = {'axesbg': color_to_hex(ax.patch.get_facecolor()),
'axesbgalpha': ax.patch.get_alpha(),
'bounds': ax.get_position().bounds,
'dynamic': ax.get_navigate(),
'axison': ax.axison,
'frame_on': ax.get_frame_on(),
'patch_visible':ax.patch.get_visible(),
'axes': [get_axis_properties(ax.xaxis),
get_axis_properties(ax.yaxis)]}
for axname in ['x', 'y']:
axis = getattr(ax, axname + 'axis')
domain = getattr(ax, 'get_{0}lim'.format(axname))()
lim = domain
if isinstance(axis.converter, matplotlib.dates.DateConverter):
scale = 'date'
if pd and isinstance(axis.converter, pd.tseries.converter.PeriodConverter):
_dates = [pd.Period(ordinal=int(d), freq=axis.freq)
for d in domain]
domain = [(d.year, d.month - 1, d.day,
d.hour, d.minute, d.second, 0)
for d in _dates]
else:
domain = [(d.year, d.month - 1, d.day,
d.hour, d.minute, d.second,
d.microsecond * 1E-3)
for d in matplotlib.dates.num2date(domain)]
else:
scale = axis.get_scale()
if scale not in ['date', 'linear', 'log']:
raise ValueError("Unknown axis scale: "
"{0}".format(axis.get_scale()))
props[axname + 'scale'] = scale
props[axname + 'lim'] = lim
props[axname + 'domain'] = domain
return props
def iter_all_children(obj, skipContainers=False):
"""
Returns an iterator over all children and nested children using
obj's get_children() method
if skipContainers is true, only childless objects are returned.
"""
if hasattr(obj, 'get_children') and len(obj.get_children()) > 0:
for child in obj.get_children():
if not skipContainers:
yield child
# could use `yield from` in python 3...
for grandchild in iter_all_children(child, skipContainers):
yield grandchild
else:
yield obj
def get_legend_properties(ax, legend):
handles, labels = ax.get_legend_handles_labels()
visible = legend.get_visible()
return {'handles': handles, 'labels': labels, 'visible': visible}
def image_to_base64(image):
"""
Convert a matplotlib image to a base64 png representation
Parameters
----------
image : matplotlib image object
The image to be converted.
Returns
-------
image_base64 : string
The UTF8-encoded base64 string representation of the png image.
"""
ax = image.axes
binary_buffer = io.BytesIO()
# image is saved in axes coordinates: we need to temporarily
# set the correct limits to get the correct image
lim = ax.axis()
ax.axis(image.get_extent())
image.write_png(binary_buffer)
ax.axis(lim)
binary_buffer.seek(0)
return base64.b64encode(binary_buffer.read()).decode('utf-8')
| bsd-3-clause |
jmetzen/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 65 | 5529 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.utils import check_random_state
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float64)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float64)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float64)
y.fill(0.0)
sw = np.ones(102, dtype=np.float64)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float64)
y.fill(1.0)
sw = np.ones(102, dtype=np.float64)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| bsd-3-clause |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/pandas/io/sas.py | 9 | 14476 | """
Tools for reading SAS XPort files into Pandas objects.
Based on code from Jack Cushman (github.com/jcushman/xport).
The file format is defined here:
https://support.sas.com/techsup/technote/ts140.pdf
"""
from datetime import datetime
import pandas as pd
from pandas.io.common import get_filepath_or_buffer
from pandas import compat
import struct
import numpy as np
from pandas.util.decorators import Appender
_correct_line1 = "HEADER RECORD*******LIBRARY HEADER RECORD!!!!!!!000000000000000000000000000000 "
_correct_header1 = "HEADER RECORD*******MEMBER HEADER RECORD!!!!!!!000000000000000001600000000"
_correct_header2 = "HEADER RECORD*******DSCRPTR HEADER RECORD!!!!!!!000000000000000000000000000000 "
_correct_obs_header = "HEADER RECORD*******OBS HEADER RECORD!!!!!!!000000000000000000000000000000 "
_fieldkeys = ['ntype', 'nhfun', 'field_length', 'nvar0', 'name', 'label',
'nform', 'nfl', 'num_decimals', 'nfj', 'nfill', 'niform',
'nifl', 'nifd', 'npos', '_']
# TODO: Support for 4 byte floats, see https://github.com/jcushman/xport/pull/3
# Need a test file
_base_params_doc = """\
Parameters
----------
filepath_or_buffer : string or file-like object
Path to SAS file or object implementing binary read method."""
_params2_doc = """\
index : identifier of index column
Identifier of column that should be used as index of the DataFrame.
encoding : string
Encoding for text data.
chunksize : int
Read file `chunksize` lines at a time, returns iterator."""
_format_params_doc = """\
format : string
File format, only `xport` is currently supported."""
_iterator_doc = """\
iterator : boolean, default False
Return XportReader object for reading file incrementally."""
_read_sas_doc = """Read a SAS file into a DataFrame.
%(_base_params_doc)s
%(_format_params_doc)s
%(_params2_doc)s
%(_iterator_doc)s
Returns
-------
DataFrame or XportReader
Examples
--------
Read a SAS Xport file:
>>> df = pandas.read_sas('filename.XPT')
Read a Xport file in 10,000 line chunks:
>>> itr = pandas.read_sas('filename.XPT', chunksize=10000)
>>> for chunk in itr:
>>> do_something(chunk)
.. versionadded:: 0.17.0
""" % {"_base_params_doc": _base_params_doc,
"_format_params_doc": _format_params_doc,
"_params2_doc": _params2_doc,
"_iterator_doc": _iterator_doc}
_xport_reader_doc = """\
Class for reading SAS Xport files.
%(_base_params_doc)s
%(_params2_doc)s
Attributes
----------
member_info : list
Contains information about the file
fields : list
Contains information about the variables in the file
""" % {"_base_params_doc": _base_params_doc,
"_params2_doc": _params2_doc}
_read_method_doc = """\
Read observations from SAS Xport file, returning as data frame.
Parameters
----------
nrows : int
Number of rows to read from data file; if None, read whole
file.
Returns
-------
A DataFrame.
"""
@Appender(_read_sas_doc)
def read_sas(filepath_or_buffer, format='xport', index=None, encoding='ISO-8859-1',
chunksize=None, iterator=False):
format = format.lower()
if format == 'xport':
reader = XportReader(filepath_or_buffer, index=index, encoding=encoding,
chunksize=chunksize)
else:
raise ValueError('only xport format is supported')
if iterator or chunksize:
return reader
return reader.read()
def _parse_date(datestr):
""" Given a date in xport format, return Python date. """
try:
return datetime.strptime(datestr, "%d%b%y:%H:%M:%S") # e.g. "16FEB11:10:07:55"
except ValueError:
return pd.NaT
def _split_line(s, parts):
"""
Parameters
----------
s: string
Fixed-length string to split
parts: list of (name, length) pairs
Used to break up string, name '_' will be filtered from output.
Returns
-------
Dict of name:contents of string at given location.
"""
out = {}
start = 0
for name, length in parts:
out[name] = s[start:start+length].strip()
start += length
del out['_']
return out
def _parse_float_vec(vec):
"""
Parse a vector of 8-byte values representing IBM 8 byte floats
into native 8 byte floats.
"""
dtype = np.dtype('>u4,>u4')
vec1 = vec.view(dtype=dtype)
xport1 = vec1['f0']
xport2 = vec1['f1']
# Start by setting first half of ieee number to first half of IBM
# number sans exponent
ieee1 = xport1 & 0x00ffffff
# Get the second half of the ibm number into the second half of
# the ieee number
ieee2 = xport2
# The fraction bit to the left of the binary point in the ieee
# format was set and the number was shifted 0, 1, 2, or 3
# places. This will tell us how to adjust the ibm exponent to be a
# power of 2 ieee exponent and how to shift the fraction bits to
# restore the correct magnitude.
shift = np.zeros(len(vec), dtype=np.uint8)
shift[np.where(xport1 & 0x00200000)] = 1
shift[np.where(xport1 & 0x00400000)] = 2
shift[np.where(xport1 & 0x00800000)] = 3
# shift the ieee number down the correct number of places then
# set the second half of the ieee number to be the second half
# of the ibm number shifted appropriately, ored with the bits
# from the first half that would have been shifted in if we
# could shift a double. All we are worried about are the low
# order 3 bits of the first half since we're only shifting by
# 1, 2, or 3.
ieee1 >>= shift
ieee2 = (xport2 >> shift) | ((xport1 & 0x00000007) << (29 + (3 - shift)))
# clear the 1 bit to the left of the binary point
ieee1 &= 0xffefffff
# set the exponent of the ieee number to be the actual exponent
# plus the shift count + 1023. Or this into the first half of the
# ieee number. The ibm exponent is excess 64 but is adjusted by 65
# since during conversion to ibm format the exponent is
# incremented by 1 and the fraction bits left 4 positions to the
# right of the radix point. (had to add >> 24 because C treats &
# 0x7f as 0x7f000000 and Python doesn't)
ieee1 |= ((((((xport1 >> 24) & 0x7f) - 65) << 2) + shift + 1023) << 20) | (xport1 & 0x80000000)
ieee = np.empty((len(ieee1),), dtype='>u4,>u4')
ieee['f0'] = ieee1
ieee['f1'] = ieee2
ieee = ieee.view(dtype='>f8')
ieee = ieee.astype('f8')
return ieee
class XportReader(object):
__doc__ = _xport_reader_doc
def __init__(self, filepath_or_buffer, index=None, encoding='ISO-8859-1',
chunksize=None):
self._encoding = encoding
self._lines_read = 0
self._index = index
self._chunksize = chunksize
if isinstance(filepath_or_buffer, str):
filepath_or_buffer, encoding, compression = get_filepath_or_buffer(
filepath_or_buffer, encoding=encoding)
if isinstance(filepath_or_buffer, (str, compat.text_type, bytes)):
self.filepath_or_buffer = open(filepath_or_buffer, 'rb')
else:
# Copy to BytesIO, and ensure no encoding
contents = filepath_or_buffer.read()
try:
contents = contents.encode(self._encoding)
except:
pass
self.filepath_or_buffer = compat.BytesIO(contents)
self._read_header()
def _get_row(self):
return self.filepath_or_buffer.read(80).decode()
def _read_header(self):
self.filepath_or_buffer.seek(0)
# read file header
line1 = self._get_row()
if line1 != _correct_line1:
raise ValueError("Header record is not an XPORT file.")
line2 = self._get_row()
file_info = _split_line(line2, [ ['prefix',24], ['version',8], ['OS',8], ['_',24], ['created',16]])
if file_info['prefix'] != "SAS SAS SASLIB":
raise ValueError("Header record has invalid prefix.")
file_info['created'] = _parse_date(file_info['created'])
self.file_info = file_info
line3 = self._get_row()
file_info['modified'] = _parse_date(line3[:16])
# read member header
header1 = self._get_row()
header2 = self._get_row()
if not header1.startswith(_correct_header1) or not header2 == _correct_header2:
raise ValueError("Member header not found.")
fieldnamelength = int(header1[-5:-2]) # usually 140, could be 135
# member info
member_info = _split_line(self._get_row(), [['prefix',8], ['set_name',8],
['sasdata',8],['version',8],
['OS',8],['_',24],['created',16]])
member_info.update( _split_line(self._get_row(), [['modified',16], ['_',16],
['label',40],['type',8]]))
member_info['modified'] = _parse_date(member_info['modified'])
member_info['created'] = _parse_date(member_info['created'])
self.member_info = member_info
# read field names
types = {1: 'numeric', 2: 'char'}
fieldcount = int(self._get_row()[54:58])
datalength = fieldnamelength*fieldcount
if datalength % 80: # round up to nearest 80
datalength += 80 - datalength%80
fielddata = self.filepath_or_buffer.read(datalength)
fields = []
obs_length = 0
while len(fielddata) >= fieldnamelength:
# pull data for one field
field, fielddata = (fielddata[:fieldnamelength], fielddata[fieldnamelength:])
# rest at end gets ignored, so if field is short, pad out
# to match struct pattern below
field = field.ljust(140)
fieldstruct = struct.unpack('>hhhh8s40s8shhh2s8shhl52s', field)
field = dict(zip(_fieldkeys, fieldstruct))
del field['_']
field['ntype'] = types[field['ntype']]
if field['ntype'] == 'numeric' and field['field_length'] != 8:
raise TypeError("Only 8-byte floats are currently implemented. Can't read field %s." % field)
for k, v in field.items():
try:
field[k] = v.strip()
except AttributeError:
pass
obs_length += field['field_length']
fields += [field]
header = self._get_row()
if not header == _correct_obs_header:
raise ValueError("Observation header not found.")
self.fields = fields
self.record_length = obs_length
self.record_start = self.filepath_or_buffer.tell()
self.nobs = self._record_count()
self.columns = [x['name'].decode() for x in self.fields]
# Setup the dtype.
dtypel = []
for i,field in enumerate(self.fields):
ntype = field['ntype']
if ntype == "numeric":
dtypel.append(('s' + str(i), ">u8"))
elif ntype == "char":
dtypel.append(('s' + str(i), "S" + str(field['field_length'])))
dtype = np.dtype(dtypel)
self._dtype = dtype
def __iter__(self):
try:
if self._chunksize:
while True:
yield self.read(self._chunksize)
else:
yield self.read()
except StopIteration:
pass
def _record_count(self):
"""
Get number of records in file.
This is maybe suboptimal because we have to seek to the end of the file.
Side effect: returns file position to record_start.
"""
self.filepath_or_buffer.seek(0, 2)
total_records_length = self.filepath_or_buffer.tell() - self.record_start
if total_records_length % 80 != 0:
warnings.warn("xport file may be corrupted")
if self.record_length > 80:
self.filepath_or_buffer.seek(self.record_start)
return total_records_length // self.record_length
self.filepath_or_buffer.seek(-80, 2)
last_card = self.filepath_or_buffer.read(80)
last_card = np.frombuffer(last_card, dtype=np.uint64)
# 8 byte blank
ix = np.flatnonzero(last_card == 2314885530818453536)
if len(ix) == 0:
tail_pad = 0
else:
tail_pad = 8 * len(ix)
self.filepath_or_buffer.seek(self.record_start)
return (total_records_length - tail_pad) // self.record_length
def get_chunk(self, size=None):
"""
Reads lines from Xport file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
def _missing_double(self, vec):
v = vec.view(dtype='u1,u1,u2,u4')
miss = (v['f1'] == 0) & (v['f2'] == 0) & (v['f3'] == 0)
miss1 = ((v['f0'] >= 0x41) & (v['f0'] <= 0x5a)) |\
(v['f0'] == 0x5f) | (v['f0'] == 0x2e)
miss &= miss1
return miss
@Appender(_read_method_doc)
def read(self, nrows=None):
if nrows is None:
nrows = self.nobs
read_lines = min(nrows, self.nobs - self._lines_read)
read_len = read_lines * self.record_length
if read_len <= 0:
raise StopIteration
raw = self.filepath_or_buffer.read(read_len)
data = np.frombuffer(raw, dtype=self._dtype, count=read_lines)
df = pd.DataFrame(index=range(read_lines))
for j,x in enumerate(self.columns):
vec = data['s%d' % j]
ntype = self.fields[j]['ntype']
if ntype == "numeric":
miss = self._missing_double(vec)
v = _parse_float_vec(vec)
v[miss] = np.nan
elif self.fields[j]['ntype'] == 'char':
v = [y.rstrip() for y in vec]
if compat.PY3:
v = [y.decode(self._encoding) for y in v]
df[x] = v
if self._index is None:
df.index = range(self._lines_read, self._lines_read + read_lines)
else:
df = df.set_index(self._index)
self._lines_read += read_lines
return df
| gpl-2.0 |
asreimer/davitpy_asr | models/tsyganenko/__init__.py | 3 | 19327 | # Copyright (C) 2012 VT SuperDARN Lab
# Full license can be found in LICENSE.txt
"""
*********************
**Module**: models.tsyganenko
*********************
This modules containes the following object(s):
* :class:`models.tsyganenko.tsygTrace`: Wraps fortran subroutines in one convenient class
This module contains the following module(s):
* :mod:`models.tsyganenko.tsygFort`: Fortran subroutines
*******************************
"""
import tsygFort
class tsygTrace(object):
def __init__(self, lat=None, lon=None, rho=None, filename=None,
coords='geo', datetime=None,
vswgse=[-400.,0.,0.], pdyn=2., dst=-5., byimf=0., bzimf=-5.,
lmax=5000, rmax=60., rmin=1., dsmax=0.01, err=0.000001):
"""
| **PACKAGE**: models.tsyganenko.trace
| **FUNCTION**: trace(lat, lon, rho, coords='geo', datetime=None,
| vswgse=[-400.,0.,0.], Pdyn=2., Dst=-5., ByIMF=0., BzIMF=-5.
| lmax=5000, rmax=60., rmin=1., dsmax=0.01, err=0.000001)
| **PURPOSE**: trace magnetic field line(s) from point(s)
|
| **INPUTS**:
| **lat**: latitude [degrees]
| **lon**: longitude [degrees]
| **rho**: distance from center of the Earth [km]
| **filename**: load a trace object directly from a file
| **[coords]**: coordinates used for start point ['geo']
| **[datetime]**: a python datetime object
| **[vswgse]**: solar wind velocity in GSE coordinates [m/s, m/s, m/s]
| **[pdyn]**: solar wind dynamic pressure [nPa]
| **[dst]**: Dst index [nT]
| **[byimf]**: IMF By [nT]
| **[bzimf]**: IMF Bz [nT]
| **[lmax]**: maximum number of points to trace
| **[rmax]**: upper trace boundary in Re
| **[rmin]**: lower trace boundary in Re
| **[dsmax]**: maximum tracing step size
| **[err]**: tracing step tolerance
|
| **OUTPUTS**:
| Elements of this object:
| **.lat[N/S]H**: latitude of the trace footpoint in Northern/Southern hemispher
| **.lon[N/S]H**: longitude of the trace footpoint in Northern/Southern hemispher
| **.rho[N/S]H**: distance of the trace footpoint in Northern/Southern hemispher
|
| **EXAMPLES**:
from numpy import arange, zeros, ones
import tsyganenko
# trace a series of points
lats = arange(10, 90, 10)
lons = zeros(len(lats))
rhos = 6372.*ones(len(lats))
trace = tsyganenko.tsygTrace(lats, lons, rhos)
# Print the results nicely
print trace
# Plot the traced field lines
ax = trace.plot()
# Or generate a 3d view of the traced field lines
ax = trace.plot3d()
# Save your trace to a file for later use
trace.save('trace.dat')
# And when you want to re-use the saved trace
trace = tsyganenko.tsygTrace(filename='trace.dat')
|
| Written by Sebastien 2012-10
"""
from datetime import datetime as pydt
assert (None not in [lat, lon, rho]) or filename, 'You must provide either (lat, lon, rho) or a filename to read from'
if None not in [lat, lon, rho]:
self.lat = lat
self.lon = lon
self.rho = rho
self.coords = coords
self.vswgse = vswgse
self.pdyn = pdyn
self.dst = dst
self.byimf = byimf
self.bzimf = bzimf
# If no datetime is provided, defaults to today
if datetime==None: datetime = pydt.utcnow()
self.datetime = datetime
iTest = self.__test_valid__()
if not iTest: self.__del__()
self.trace()
elif filename:
self.load(filename)
def __test_valid__(self):
"""
| Test the validity of input arguments to the tsygTrace class and trace method
|
| Written by Sebastien 2012-10
"""
assert (len(self.vswgse) == 3), 'vswgse must have 3 elements'
assert (self.coords.lower() == 'geo'), '{}: this coordinae system is not supported'.format(self.coords.lower())
# A provision for those who want to batch trace
try:
[l for l in self.lat]
except:
self.lat = [self.lat]
try:
[l for l in self.lon]
except:
self.lon = [self.lon]
try:
[r for r in self.rho]
except:
self.rho = [self.rho]
try:
[d for d in self.datetime]
except:
self.datetime = [self.datetime for l in self.lat]
# Make sure they're all the sam elength
assert (len(self.lat) == len(self.lon) == len(self.rho) == len(self.datetime)), \
'lat, lon, rho and datetime must me the same length'
return True
def trace(self, lat=None, lon=None, rho=None, coords=None, datetime=None,
vswgse=None, pdyn=None, dst=None, byimf=None, bzimf=None,
lmax=5000, rmax=60., rmin=1., dsmax=0.01, err=0.000001):
"""
| See tsygTrace for a description of each parameter
| Any unspecified parameter default to the one stored in the object
| Unspecified lmax, rmax, rmin, dsmax, err has a set default value
|
| Written by Sebastien 2012-10
"""
from numpy import radians, degrees, zeros
# Store existing values of class attributes in case something is wrong
# and we need to revert back to them
if lat: _lat = self.lat
if lon: _lon = self.lon
if rho: _rho = self.rho
if coords: _coords = self.coords
if vswgse: _vswgse = self.vswgse
if not datetime==None: _datetime = self.datetime
# Pass position if new
if lat: self.lat = lat
lat = self.lat
if lon: self.lon = lon
lon = self.lon
if rho: self.rho = rho
rho = self.rho
if not datetime==None: self.datetime = datetime
datetime = self.datetime
# Set necessary parameters if new
if coords: self.coords = coords
coords = self.coords
if not datetime==None: self.datetime = datetime
datetime = self.datetime
if vswgse: self.vswgse = vswgse
vswgse = self.vswgse
if pdyn: self.pdyn = pdyn
pdyn = self.pdyn
if dst: self.dst = dst
dst = self.dst
if byimf: self.byimf = byimf
byimf = self.byimf
if bzimf: self.bzimf = bzimf
bzimf = self.bzimf
# Test that everything is in order, if not revert to existing values
iTest = self.__test_valid__()
if not iTest:
if lat: self.lat = _lat
if lon: _self.lon = lon
if rho: self.rho = _rho
if coords: self.coords = _coords
if vswgse: self.vswgse = _vswgse
if not datetime==None: self.datetime = _datetime
# Declare the same Re as used in Tsyganenko models [km]
Re = 6371.2
# Initialize trace array
self.l = zeros(len(lat))
self.xTrace = zeros((len(lat),2*lmax))
self.yTrace = self.xTrace.copy()
self.zTrace = self.xTrace.copy()
self.xGsw = self.l.copy()
self.yGsw = self.l.copy()
self.zGsw = self.l.copy()
self.latNH = self.l.copy()
self.lonNH = self.l.copy()
self.rhoNH = self.l.copy()
self.latSH = self.l.copy()
self.lonSH = self.l.copy()
self.rhoSH = self.l.copy()
# And now iterate through the desired points
for ip in xrange(len(lat)):
# This has to be called first
tsygFort.recalc_08(datetime[ip].year,datetime[ip].timetuple().tm_yday,
datetime[ip].hour,datetime[ip].minute,datetime[ip].second,
vswgse[0],vswgse[1],vswgse[2])
# Convert lat,lon to geographic cartesian and then gsw
r, theta, phi, xgeo, ygeo, zgeo = tsygFort.sphcar_08(
rho[ip]/Re, radians(90.-lat[ip]), radians(lon[ip]),
0., 0., 0.,
1)
if coords.lower() == 'geo':
xgeo, ygeo, zgeo, xgsw, ygsw, zgsw = tsygFort.geogsw_08(
xgeo, ygeo, zgeo,
0. ,0. ,0. ,
1)
self.xGsw[ip] = xgsw
self.yGsw[ip] = ygsw
self.zGsw[ip] = zgsw
# Trace field line
inmod = 'IGRF_GSW_08'
exmod = 'T96_01'
parmod = [pdyn, dst, byimf, bzimf, 0, 0, 0, 0, 0, 0]
# First towards southern hemisphere
maptoL = [-1, 1]
for mapto in maptoL:
xfgsw, yfgsw, zfgsw, xarr, yarr, zarr, l = tsygFort.trace_08( xgsw, ygsw, zgsw,
mapto, dsmax, err, rmax, rmin, 0,
parmod, exmod, inmod,
lmax )
# Convert back to spherical geographic coords
xfgeo, yfgeo, zfgeo, xfgsw, yfgsw, zfgsw = tsygFort.geogsw_08(
0. ,0. ,0. ,
xfgsw, yfgsw, zfgsw,
-1)
geoR, geoColat, geoLon, xgeo, ygeo, zgeo = tsygFort.sphcar_08(
0., 0., 0.,
xfgeo, yfgeo, zfgeo,
-1)
# Get coordinates of traced point
if mapto == 1:
self.latSH[ip] = 90. - degrees(geoColat)
self.lonSH[ip] = degrees(geoLon)
self.rhoSH[ip] = geoR*Re
elif mapto == -1:
self.latNH[ip] = 90. - degrees(geoColat)
self.lonNH[ip] = degrees(geoLon)
self.rhoNH[ip] = geoR*Re
# Store trace
if mapto == -1:
self.xTrace[ip,0:l] = xarr[l-1::-1]
self.yTrace[ip,0:l] = yarr[l-1::-1]
self.zTrace[ip,0:l] = zarr[l-1::-1]
elif mapto == 1:
self.xTrace[ip,self.l[ip]:self.l[ip]+l] = xarr[0:l]
self.yTrace[ip,self.l[ip]:self.l[ip]+l] = yarr[0:l]
self.zTrace[ip,self.l[ip]:self.l[ip]+l] = zarr[0:l]
self.l[ip] += l
# Resize trace output to more minimum possible length
self.xTrace = self.xTrace[:,0:self.l.max()]
self.yTrace = self.yTrace[:,0:self.l.max()]
self.zTrace = self.zTrace[:,0:self.l.max()]
def __str__(self):
"""
| Print object information in a nice way
|
| Written by Sebastien 2012-10
"""
# Declare print format
outstr = '''
vswgse=[{:6.0f},{:6.0f},{:6.0f}] [m/s]
pdyn={:3.0f} [nPa]
dst={:3.0f} [nT]
byimf={:3.0f} [nT]
bzimf={:3.0f} [nT]
'''.format(self.vswgse[0],
self.vswgse[1],
self.vswgse[2],
self.pdyn,
self.dst,
self.byimf,
self.bzimf)
outstr += '\nCoords: {}\n'.format(self.coords)
outstr += '(latitude [degrees], longitude [degrees], distance from center of the Earth [km])\n'
# Print stuff
for ip in xrange(len(self.lat)):
outstr += '''
({:6.3f}, {:6.3f}, {:6.3f}) @ {}
--> NH({:6.3f}, {:6.3f}, {:6.3f})
--> SH({:6.3f}, {:6.3f}, {:6.3f})
'''.format(self.lat[ip], self.lon[ip], self.rho[ip],
self.datetime[ip].strftime('%H:%M UT (%d-%b-%y)'),
self.latNH[ip], self.lonNH[ip], self.rhoNH[ip],
self.latSH[ip], self.lonSH[ip], self.rhoSH[ip])
return outstr
def save(self, filename):
"""
| Save trace information to a file
|
| Written by Sebastien 2012-10
"""
import cPickle as pickle
with open( filename, "wb" ) as fileObj:
pickle.dump(self, fileObj)
def load(self, filename):
"""
| load trace information from a file
|
| Written by Sebastien 2012-10
"""
import cPickle as pickle
with open( filename, "rb" ) as fileObj:
obj = pickle.load(fileObj)
for k, v in obj.__dict__.items():
self.__dict__[k] = v
def plot(self, proj='xz', color='b', onlyPts=None, showPts=False,
showEarth=True, disp=True, **kwargs):
"""
| Generate a 2D plot of the trace projected onto a given plane
| Graphic keywords apply to the plot method for the field lines
|
| **INPUTS**:
| **plane**: the projection plane in GSW coordinates
| **onlyPts**: if the trace countains multiple point, only show the specified indices (list)
| **showEarth**: Toggle Earth disk visibility on/off
| **showPts**: Toggle start points visibility on/off
| **disp**: invoke pylab.show()
| **color**: field line color
| **kwargs**: see matplotlib.axes.Axes.plot
|
| **OUTPUTS**:
| **ax**: matplotlib axes object
|
| Written by Sebastien 2012-10
"""
from pylab import gcf, gca, show
from matplotlib.patches import Circle
from numpy import pi, linspace, outer, ones, size, cos, sin, radians, cross
from numpy.ma import masked_array
assert (len(proj) == 2) or \
(proj[0] in ['x','y','z'] and proj[1] in ['x','y','z']) or \
(proj[0] != proj[1]), 'Invalid projection plane'
fig = gcf()
ax = fig.gca()
ax.set_aspect('equal')
# First plot a nice disk for the Earth
if showEarth:
circ = Circle(xy=(0,0), radius=1, facecolor='0.8', edgecolor='k', alpha=.5, zorder=0)
ax.add_patch(circ)
# Select indices to show
if onlyPts is None:
inds = xrange(len(self.lat))
else:
try:
inds = [ip for ip in onlyPts]
except:
inds = [onlyPts]
# Then plot the traced field line
for ip in inds:
# Select projection plane
if proj[0] == 'x':
xx = self.xTrace[ip,0:self.l[ip]]
xpt = self.xGsw[ip]
ax.set_xlabel(r'$X_{GSW}$')
xdir = [1,0,0]
elif proj[0] == 'y':
xx = self.yTrace[ip,0:self.l[ip]]
xpt = self.yGsw[ip]
ax.set_xlabel(r'$Y_{GSW}$')
xdir = [0,1,0]
elif proj[0] == 'z':
xx = self.zTrace[ip,0:self.l[ip]]
xpt = self.zGsw[ip]
ax.set_xlabel(r'$Z_{GSW}$')
xdir = [0,0,1]
if proj[1] == 'x':
yy = self.xTrace[ip,0:self.l[ip]]
ypt = self.xGsw[ip]
ax.set_ylabel(r'$X_{GSW}$')
ydir = [1,0,0]
elif proj[1] == 'y':
yy = self.yTrace[ip,0:self.l[ip]]
ypt = self.yGsw[ip]
ax.set_ylabel(r'$Y_{GSW}$')
ydir = [0,1,0]
elif proj[1] == 'z':
yy = self.zTrace[ip,0:self.l[ip]]
ypt = self.zGsw[ip]
ax.set_ylabel(r'$Z_{GSW}$')
ydir = [0,0,1]
sign = 1 if -1 not in cross(xdir,ydir) else -1
if 'x' not in proj:
zz = sign*self.xGsw[ip]
indMask = sign*self.xTrace[ip,0:self.l[ip]] < 0
if 'y' not in proj:
zz = sign*self.yGsw[ip]
indMask = sign*self.yTrace[ip,0:self.l[ip]] < 0
if 'z' not in proj:
zz = sign*self.zGsw[ip]
indMask = sign*self.zTrace[ip,0:self.l[ip]] < 0
# Plot
ax.plot(masked_array(xx, mask=~indMask),
masked_array(yy, mask=~indMask),
zorder=-1, color=color, **kwargs)
ax.plot(masked_array(xx, mask=indMask),
masked_array(yy, mask=indMask),
zorder=1, color=color, **kwargs)
if showPts:
ax.scatter(xpt, ypt, c='k', s=40, zorder=zz)
if disp: show()
return ax
def plot3d(self, onlyPts=None, showEarth=True, showPts=False, disp=True,
xyzlim=None, zorder=1, linewidth=2, color='b', **kwargs):
"""
| Generate a 3D plot of the trace
| Graphic keywords apply to the plot3d method for the field lines
|
| **INPUTS**:
| **onlyPts**: if the trace countains multiple point, only show the specified indices (list)
| **showEarth**: Toggle Earth sphere visibility on/off
| **showPts**: Toggle start points visibility on/off
| **disp**: invoke pylab.show()
| **xyzlim**: 3D axis limits
| **zorder**: 3D layers ordering
| **linewidth**: field line width
| **color**: field line color
| **kwargs**: see mpl_toolkits.mplot3d.axes3d.Axes3D.plot3D
|
| **OUTPUTS**:
| **ax**: matplotlib axes object
|
| Written by Sebastien 2012-10
"""
from mpl_toolkits.mplot3d import proj3d
from numpy import pi, linspace, outer, ones, size, cos, sin, radians
from pylab import gca, gcf, show
fig = gcf()
ax = fig.gca(projection='3d')
# First plot a nice sphere for the Earth
if showEarth:
u = linspace(0, 2 * pi, 179)
v = linspace(0, pi, 179)
tx = outer(cos(u), sin(v))
ty = outer(sin(u), sin(v))
tz = outer(ones(size(u)), cos(v))
ax.plot_surface(tx,ty,tz,rstride=10, cstride=10, color='grey', alpha=.5, zorder=0, linewidth=0.5)
# Select indices to show
if onlyPts is None:
inds = xrange(len(self.lat))
else:
try:
inds = [ip for ip in onlyPts]
except:
inds = [onlyPts]
# Then plot the traced field line
for ip in inds:
ax.plot3D( self.xTrace[ip,0:self.l[ip]],
self.yTrace[ip,0:self.l[ip]],
self.zTrace[ip,0:self.l[ip]],
zorder=zorder, linewidth=linewidth, color=color, **kwargs)
if showPts:
ax.scatter3D(self.xGsw[ip], self.yGsw[ip], self.zGsw[ip], c='k')
# Set plot limits
if not xyzlim:
xyzlim = max( [ ax.get_xlim3d().max(),
ax.get_ylim3d().max(),
ax.get_zlim3d().max(), ] )
ax.set_xlim3d([-xyzlim,xyzlim])
ax.set_ylim3d([-xyzlim,xyzlim])
ax.set_zlim3d([-xyzlim,xyzlim])
if disp: show()
return ax
| gpl-3.0 |
roxyboy/bokeh | bokeh/charts/builder/step_builder.py | 43 | 5445 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Step class which lets you build your Step charts just
passing the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import numpy as np
from six import string_types
from ..utils import cycle_colors
from .._builder import create_and_build, Builder
from ...models import ColumnDataSource, DataRange1d, GlyphRenderer
from ...models.glyphs import Line
from ...properties import Any
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Step(values, index=None, **kws):
""" Create a step chart using :class:`StepBuilder <bokeh.charts.builder.step_builder.StepBuilder>`
render the geometry from values and index.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
index (str|1d iterable, optional): can be used to specify a common custom
index for all data series as an **1d iterable** of any sort that will be used as
series common index or a **string** that corresponds to the key of the
mapping to be used as index (and not as data series) if
area.values is a mapping (like a dict, an OrderedDict
or a pandas DataFrame)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from collections import OrderedDict
from bokeh.charts import Step, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
xyvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]
step = Step(xyvalues, title="Steps", legend="top_left", ylabel='Languages')
output_file('step.html')
show(step)
"""
return create_and_build(StepBuilder, values, index=index, **kws)
class StepBuilder(Builder):
"""This is the Step class and it is in charge of plotting
Step charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the
source.
"""
index = Any(help="""
An index to be used for all data series as follows:
- A 1d iterable of any sort that will be used as
series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame)
""")
def _process_data(self):
"""It calculates the chart properties accordingly from Step.values.
Then build a dict containing references to all the points to be
used by the segment glyph inside the ``_yield_renderers`` method.
"""
self._data = dict()
self._groups = []
orig_xs = self._values_index
xs = np.empty(2*len(orig_xs)-1, dtype=np.int)
xs[::2] = orig_xs[:]
xs[1::2] = orig_xs[1:]
self._data['x'] = xs
for i, col in enumerate(self._values.keys()):
if isinstance(self.index, string_types) and col == self.index:
continue
# save every new group we find
self._groups.append(col)
orig_ys = np.array([self._values[col][x] for x in orig_xs])
ys = np.empty(2*len(orig_ys)-1)
ys[::2] = orig_ys[:]
ys[1::2] = orig_ys[:-1]
self._data['y_%s' % col] = ys
def _set_sources(self):
""" Push the Step data into the ColumnDataSource and calculate
the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = DataRange1d()
#y_sources = [sc.columns("y_%s" % col) for col in self._groups]
self.y_range = DataRange1d()
def _yield_renderers(self):
"""Use the line glyphs to connect the xy points in the Step.
Takes reference points from the data loaded at the ColumnDataSource.
"""
colors = cycle_colors(self._groups, self.palette)
for i, name in enumerate(self._groups):
# draw the step horizontal segment
glyph = Line(x="x", y="y_%s" % name, line_color=colors[i], line_width=2)
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i], [renderer]))
yield renderer
| bsd-3-clause |
brenthuisman/phd_tools | analysis.spot.falloff.py | 1 | 6307 | #!/usr/bin/env python
import numpy as np,plot,auger
#OPT: quickly get sorted rundirs
# zb autogen | sort -k1.13 -r
#OPT: fix seed
#np.random.seed(65983247)
#we dont know what the added or reduced noise level is when changing energy windows, so we cant compare performance for ipnl3 and iba1.
typs=['ipnl-auger-tof-1.root','iba-auger-notof-3.root']
#typs+=['ipnl-auger-notof-1.root','iba-auger-tof-3.root','ipnl-auger-tof-3.root','iba-auger-tof-1.root','ipnl-auger-notof-3.root','iba-auger-notof-1.root']
#ipnl FFFF'spot61'
#typs=['ipnlf-auger-tof-1.root','ipnlf-auger-notof-1.root']
def megaplot(ctsets,studyname,emisfops=None,labels=["$10^9$","$10^8$","$10^7$","$10^6$"],axlabel='Primaries [nr]'):
if emisfops is not None:
for emisfop in emisfops:
emisfop[0]+=15.863
emisfop[1]+=15.863
print 'FOP shift all overlaid'
if len(ctsets) == 4:
f, ((ax1,ax2),(ax3,ax4)) = plot.subplots(nrows=2, ncols=2, sharex=False, sharey=False)
auger.plot_all_ranges(ax1,ctsets[0])
auger.plot_all_ranges(ax2,ctsets[1])
auger.plot_all_ranges(ax3,ctsets[2])
auger.plot_all_ranges(ax4,ctsets[3])
if not 'Primaries' in axlabel:
ax1.set_title(labels[0])
ax2.set_title(labels[1])
ax3.set_title(labels[2])
ax4.set_title(labels[3])
f.subplots_adjust(hspace=.5)
ax1.set_xlabel('')
ax2.set_xlabel('')
ax2.set_ylabel('')
ax4.set_ylabel('')
f.savefig(studyname+'-'+typ+'-FOP.pdf', bbox_inches='tight')
plot.close('all')
#############################################################################################
print 'FOP shift distributions'
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
fig = plt.figure()
ax1 = plt.axes(projection='3d')
ax1.view_init(30, -50)
for i,ctset in enumerate(ctsets):
auger.plotfodiffdist(ax1,ctset,i,emisfops,labels,axlabel)
if not emisfops == None:
fopshifts=[]
for fopset in emisfops:
fopshifts.append( fopset[-1]-fopset[0] )
ax1.set_xlim3d(np.mean(fopshifts)-20,np.mean(fopshifts)+20)
if emisfops is not None and len(emisfops) == 1:
ax1.set_title(studyname+', $Shift_{em}$ = '+str(emisfops[0][-1]-emisfops[0][0]), y=1.08)
#plt.tight_layout(rect = [-0.1, 0.0, 1.0, 1.1])#L,B,R,T
fig.savefig(studyname+'-'+typ+'-FOP-shift.pdf')#, bbox_inches='tight')
plt.close('all')
#############################################################################################
print 'FOP distributions'
fig = plt.figure()
ax1 = plt.axes(projection='3d')
ax1.view_init(30, -50)
for i,ctset in enumerate(ctsets):
auger.plotfodist(ax1,ctset,i,emisfops,labels,axlabel)
if emisfops is not None and len(emisfops) == 1:
ax1.set_title(studyname+', $CT_{FOP_{em}}$ = '+str(emisfops[0][0])[:5]+', $RPCT_{FOP_{em}}$ = '+str(emisfops[0][1])[:5], y=1.08)
#plt.legend()#shadow = True,frameon = True,fancybox = True,ncol = 1,fontsize = 'x-small',loc = 'lower right')
#plt.tight_layout(rect = [-0.1, 0.0, 1.0, 1.1])#L,B,R,T
plt.savefig(studyname+'-'+typ+'-FOP-dist.pdf')#, bbox_inches='tight')
plt.close('all')
#############################################################################################
# TODO add pgemissions plots.
for typ in typs:
ctsetsets = []
#ctsetsets.append( auger.getctset(1e9,'run.A3vG','run.9950',typ) )# 7 , 10 # E0OX , KVWm :omdat E0OX lang duurt, relaunced als wzVw
#ctsetsets.append( auger.getctset(1e8,'run.lNkm','run.ztcV',typ) )# 0 , 4 # , i7Oz :done
#ctsetsets.append( auger.getctset(1e7,'run.AE5G','run.gK11',typ) )# 2 , 0 # ZbiH , :done
#ctsetsets.append( auger.getctset(1e6,'run.rFN5','run.jUPf',typ) )# 5 , 0 # AGJb , :done
#megaplot(ctsetsets,'spot61',[[6.715,20.58]])
#ctsetsets.append( auger.getctset(1e9,'run.9WZ0','run.xEMN',typ) )#
#ctsetsets.append( auger.getctset(1e8,'run.E3So','run.okUi',typ) )#
#ctsetsets.append( auger.getctset(1e7,'run.aNrj','run.GERe',typ) )# , 1 # , w9gm :done
#ctsetsets.append( auger.getctset(1e6,'run.7OvR','run.ykAn',typ) )#
#megaplot(ctsetsets,'spot29',[[-1.75,0.291]])
#ctsetsets.append( auger.getctset(1e9,'run.AuZu','run.ECUY',typ) )# 2 , 1 # vWKg , wS3i :done
#ctsetsets.append( auger.getctset(1e8,'run.XkoP','run.cLDV',typ) )# , 6 # , kDxT :done
#ctsetsets.append( auger.getctset(1e7,'run.V4ER','run.Hhup',typ) )#
#ctsetsets.append( auger.getctset(1e6,'run.zE43','run.MUoq',typ) )# 7 , # EdQf , :done
#megaplot(ctsetsets,'spot40',[[-10.6,-7.59]])
ctsetsets.append( auger.getctset(1e9,'run.kVk7','run.iias',typ) )
ctsetsets.append( auger.getctset(1e8,'run.cj4U','run.RWsd',typ) )
ctsetsets.append( auger.getctset(1e7,'run.ngMk','run.Ho8b',typ) )
ctsetsets.append( auger.getctset(1e6,'run.WbDj','run.f9cL',typ) )
megaplot(ctsetsets,'waterbox')
#ctsetsets.append( auger.getctset(1e9,'run.1vRP','run.KvuV',typ) )# , 11 # , g4RC :done
#ctsetsets.append( auger.getctset(1e8,'run.SwIj','run.XYSQ',typ) )# 50 , # SwIj , :done
#ctsetsets.append( auger.getctset(1e7,'run.UAJZ','run.ebjM',typ) )
#ctsetsets.append( auger.getctset(1e6,'run.BPnu','run.q7Vn',typ) )
#megaplot(ctsetsets,'waterboxshifted')
#compare 144MeV against shifted 139mev
#ctsetsets.append( auger.getctset(1e9,'run.LswJ','run.KvuV',typ) )
#ctsetsets.append( auger.getctset(1e8,'run.IdLD','run.XYSQ',typ) )
#ctsetsets.append( auger.getctset(1e7,'run.pDRM','run.ebjM',typ) )
#ctsetsets.append( auger.getctset(1e6,'run.pSBH','run.q7Vn',typ) )
#megaplot(ctsetsets,'waterboxfakeshift')
#ctsetsets.append( auger.getctset(27500000,'run.LUBP','run.hRZn',typ) )#spot29#
#ctsetsets.append( auger.getctset(47300000,'run.7WZC','run.3Zs9',typ) )#spot61# 3 , 11 # s3aB, 4gLD+TWZQ :done
#megaplot(ctsetsets,'realspots',[[-1.75,0.291],[6.715,20.58]],['Spot 29','Spot 61'],'Spot Nr')
#elay fopshift 4.3359375, geolay fopshift 6.359375
#ctsetsets.append( auger.getctset(513093255,'run.Gi7J','run.aSej',typ) )#elay # 0,6 # , F3gZ # +UROr 7x (4x)
#ctsetsets.append( auger.getctset(537825202,'run.WfRk','run.F4bu',typ) )#geolay# 0,2 # , bvJG # +XJ8V 3x
#megaplot(ctsetsets,'laygroup',[[0,4.33],[0,6.35]],['Energy Layer','Geometric Layer'],'Grouping Method')
print 'Mean detection yield in',typ,'study over',sum([ctset['totnprim'] for ctset in ctsetsets]),'primaries in',sum([ctset['nreal'] for ctset in ctsetsets]),'realisations:',sum([ctset['detyieldmu'] for ctset in ctsetsets])
| lgpl-3.0 |
diana-hep/carl | tests/distributions/test_normal.py | 1 | 3195 | # Carl is free software; you can redistribute it and/or modify it
# under the terms of the Revised BSD License; see LICENSE file for
# more details.
import numpy as np
import scipy.stats as st
from numpy.testing import assert_array_almost_equal
from sklearn.utils import check_random_state
from carl.distributions import Normal
from carl.distributions import MultivariateNormal
def check_normal(mu, sigma):
rng = check_random_state(1)
p_carl = Normal(mu=mu, sigma=sigma)
p_scipy = st.norm(loc=mu, scale=sigma)
X = rng.rand(50, 1)
assert_array_almost_equal(p_carl.pdf(X),
p_scipy.pdf(X.ravel()))
assert_array_almost_equal(p_carl.cdf(X),
p_scipy.cdf(X.ravel()))
assert_array_almost_equal(-np.log(p_carl.pdf(X)),
p_carl.nll(X))
def test_normal():
for mu, sigma in [(0., 1.), (-1., 1.5), (3., 2.)]:
yield check_normal, mu, sigma
def check_rvs(mu, sigma, random_state):
p = Normal(mu=mu, sigma=sigma)
samples = p.rvs(10000, random_state=random_state)
assert np.abs(np.mean(samples) - mu) <= 0.05
assert np.abs(np.std(samples) - sigma) <= 0.05
def test_rvs():
for mu, sigma, random_state in [(0, 1, 0), (1, 1, 1),
(2, 2, 3), (-1, 0.5, 4)]:
yield check_rvs, mu, sigma, random_state
def check_fit(mu, sigma):
p = Normal()
X = st.norm(loc=mu, scale=sigma).rvs(5000, random_state=0).reshape(-1, 1)
s0 = p.score(X)
p.fit(X)
assert np.abs(p.mu.get_value() - mu) <= 0.1
assert np.abs(p.sigma.get_value() - sigma) <= 0.1
assert p.score(X) >= s0
def test_fit():
for mu, sigma in [(0., 1.), (-1., 1.5), (3., 2.)]:
yield check_fit, mu, sigma
def test_fit_with_constraints():
p = Normal()
X = st.norm(loc=0.05, scale=1.0).rvs(5000, random_state=0).reshape(-1, 1)
p.fit(X, constraints=[
{"param": p.mu, "type": "ineq", "fun": lambda mu: mu},
{"param": p.mu, "type": "ineq", "fun": lambda mu: 0.1 - mu},
{"param": p.sigma, "type": "ineq", "fun": lambda sigma: sigma},
{"param": (p.mu, p.sigma), "type": "ineq",
"fun": lambda mu, sigma: mu * sigma}])
assert p.mu.get_value() >= 0.0
assert p.mu.get_value() <= 0.1
assert p.sigma.get_value() >= 0.0
assert p.mu.get_value() * p.sigma.get_value() >= 0.0
def test_fit_with_bounds():
p = Normal()
X = st.norm(loc=0.05, scale=1.0).rvs(5000, random_state=0).reshape(-1, 1)
p.fit(X, bounds=[{"param": p.sigma, "bounds": (0, None)}])
assert p.sigma.get_value() >= 0.0
def check_mv_normal(mu, sigma):
p = MultivariateNormal(mu=mu, sigma=sigma)
X = p.rvs(20000, random_state=0)
assert np.mean(mu - X.mean(axis=0)) < 0.02
assert np.mean(sigma - np.cov(X.T)) < 0.02
assert p.ndim == len(mu)
def test_mv_normal():
for mu, sigma in [(np.array([0.0, 0.0]), np.eye(2)),
(np.array([1.0, -1.0, 0.0]), np.eye(3)),
(np.array([0.5, -0.5]), np.array([[1.0, 0.5],
[0.5, 2.0]]))]:
yield check_mv_normal, mu, sigma
| bsd-3-clause |
MatthieuBizien/scikit-learn | sklearn/metrics/scorer.py | 23 | 13077 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.model_selection.GridSearchCV` or
:func:`sklearn.model_selection.cross_val_score` as the ``scoring``
parameter, to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <[email protected]>
# Lars Buitinck
# Arnaud Joly <[email protected]>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(metric, pos_label=None,
average=average)
| bsd-3-clause |
iproduct/course-social-robotics | 11-dnn-keras/venv/Lib/site-packages/pandas/core/ops/mask_ops.py | 5 | 4935 | """
Ops for masked arrays.
"""
from typing import Optional, Union
import numpy as np
from pandas._libs import lib, missing as libmissing
def kleene_or(
left: Union[bool, np.ndarray],
right: Union[bool, np.ndarray],
left_mask: Optional[np.ndarray],
right_mask: Optional[np.ndarray],
):
"""
Boolean ``or`` using Kleene logic.
Values are NA where we have ``NA | NA`` or ``NA | False``.
``NA | True`` is considered True.
Parameters
----------
left, right : ndarray, NA, or bool
The values of the array.
left_mask, right_mask : ndarray, optional
The masks. Only one of these may be None, which implies that
the associated `left` or `right` value is a scalar.
Returns
-------
result, mask: ndarray[bool]
The result of the logical or, and the new mask.
"""
# To reduce the number of cases, we ensure that `left` & `left_mask`
# always come from an array, not a scalar. This is safe, since because
# A | B == B | A
if left_mask is None:
return kleene_or(right, left, right_mask, left_mask)
assert isinstance(left, np.ndarray)
raise_for_nan(right, method="or")
if right is libmissing.NA:
result = left.copy()
else:
result = left | right
if right_mask is not None:
# output is unknown where (False & NA), (NA & False), (NA & NA)
left_false = ~(left | left_mask)
right_false = ~(right | right_mask)
mask = (
(left_false & right_mask)
| (right_false & left_mask)
| (left_mask & right_mask)
)
else:
if right is True:
mask = np.zeros_like(left_mask)
elif right is libmissing.NA:
mask = (~left & ~left_mask) | left_mask
else:
# False
mask = left_mask.copy()
return result, mask
def kleene_xor(
left: Union[bool, np.ndarray],
right: Union[bool, np.ndarray],
left_mask: Optional[np.ndarray],
right_mask: Optional[np.ndarray],
):
"""
Boolean ``xor`` using Kleene logic.
This is the same as ``or``, with the following adjustments
* True, True -> False
* True, NA -> NA
Parameters
----------
left, right : ndarray, NA, or bool
The values of the array.
left_mask, right_mask : ndarray, optional
The masks. Only one of these may be None, which implies that
the associated `left` or `right` value is a scalar.
Returns
-------
result, mask: ndarray[bool]
The result of the logical xor, and the new mask.
"""
if left_mask is None:
return kleene_xor(right, left, right_mask, left_mask)
raise_for_nan(right, method="xor")
if right is libmissing.NA:
result = np.zeros_like(left)
else:
result = left ^ right
if right_mask is None:
if right is libmissing.NA:
mask = np.ones_like(left_mask)
else:
mask = left_mask.copy()
else:
mask = left_mask | right_mask
return result, mask
def kleene_and(
left: Union[bool, libmissing.NAType, np.ndarray],
right: Union[bool, libmissing.NAType, np.ndarray],
left_mask: Optional[np.ndarray],
right_mask: Optional[np.ndarray],
):
"""
Boolean ``and`` using Kleene logic.
Values are ``NA`` for ``NA & NA`` or ``True & NA``.
Parameters
----------
left, right : ndarray, NA, or bool
The values of the array.
left_mask, right_mask : ndarray, optional
The masks. Only one of these may be None, which implies that
the associated `left` or `right` value is a scalar.
Returns
-------
result, mask: ndarray[bool]
The result of the logical xor, and the new mask.
"""
# To reduce the number of cases, we ensure that `left` & `left_mask`
# always come from an array, not a scalar. This is safe, since because
# A | B == B | A
if left_mask is None:
return kleene_and(right, left, right_mask, left_mask)
assert isinstance(left, np.ndarray)
raise_for_nan(right, method="and")
if right is libmissing.NA:
result = np.zeros_like(left)
else:
result = left & right
if right_mask is None:
# Scalar `right`
if right is libmissing.NA:
mask = (left & ~left_mask) | left_mask
else:
mask = left_mask.copy()
if right is False:
# unmask everything
mask[:] = False
else:
# unmask where either left or right is False
left_false = ~(left | left_mask)
right_false = ~(right | right_mask)
mask = (left_mask & ~right_false) | (right_mask & ~left_false)
return result, mask
def raise_for_nan(value, method):
if lib.is_float(value) and np.isnan(value):
raise ValueError(f"Cannot perform logical '{method}' with floating NaN")
| gpl-2.0 |
msingh172/pylearn2 | pylearn2/train_extensions/live_monitoring.py | 30 | 11536 | """
Training extension for allowing querying of monitoring values while an
experiment executes.
"""
__authors__ = "Dustin Webb"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Dustin Webb"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import copy
try:
import zmq
zmq_available = True
except:
zmq_available = False
try:
import matplotlib.pyplot as plt
pyplot_available = True
except:
pyplot_available = False
from functools import wraps
from pylearn2.monitor import Monitor
from pylearn2.train_extensions import TrainExtension
class LiveMonitorMsg(object):
"""
Base class that defines the required interface for all Live Monitor
messages.
"""
response_set = False
def get_response(self):
"""
Method that instantiates a response message for a given request
message. It is not necessary to implement this function on response
messages.
"""
raise NotImplementedError('get_response is not implemented.')
class ChannelListResponse(LiveMonitorMsg):
"""
A message containing the list of channels being monitored.
"""
pass
class ChannelListRequest(LiveMonitorMsg):
"""
A message indicating a request for a list of channels being monitored.
"""
@wraps(LiveMonitorMsg.get_response)
def get_response(self):
return ChannelListResponse()
class ChannelsResponse(LiveMonitorMsg):
"""
A message containing monitoring data related to the channels specified.
Data can be requested for all epochs or select epochs.
Parameters
----------
channel_list : list
A list of the channels for which data has been requested.
start : int
The starting epoch for which data should be returned.
end : int
The epoch after which data should be returned.
step : int
The number of epochs to be skipped between data points.
"""
def __init__(self, channel_list, start, end, step=1):
assert(
isinstance(channel_list, list)
and len(channel_list) > 0
)
self.channel_list = channel_list
assert(start >= 0)
self.start = start
self.end = end
assert(step > 0)
self.step = step
class ChannelsRequest(LiveMonitorMsg):
"""
A message for requesting data related to the channels specified.
Parameters
----------
channel_list : list
A list of the channels for which data has been requested.
start : int
The starting epoch for which data should be returned.
end : int
The epoch after which data should be returned.
step : int
The number of epochs to be skipped between data points.
"""
def __init__(self, channel_list, start=0, end=-1, step=1):
assert(
isinstance(channel_list, list)
and len(channel_list) > 0
)
self.channel_list = channel_list
assert(start >= 0)
self.start = start
self.end = end
assert(step > 0)
self.step = step
@wraps(LiveMonitorMsg.get_response)
def get_response(self):
return ChannelsResponse(
self.channel_list,
self.start,
self.end,
self.step
)
class LiveMonitoring(TrainExtension):
"""
A training extension for remotely monitoring and filtering the channels
being monitored in real time. PyZMQ must be installed for this extension
to work.
Parameters
----------
address : string
The IP addresses of the interfaces on which the monitor should listen.
req_port : int
The port number to be used to service request.
pub_port : int
The port number to be used to publish updates.
"""
def __init__(self, address='*', req_port=5555, pub_port=5556):
if not zmq_available:
raise ImportError('zeromq needs to be installed to '
'use this module.')
self.address = 'tcp://%s' % address
assert(req_port != pub_port)
assert(req_port > 1024 and req_port < 65536)
self.req_port = req_port
assert(pub_port > 1024 and pub_port < 65536)
self.pub_port = pub_port
address_template = self.address + ':%d'
self.context = zmq.Context()
self.req_sock = None
if self.req_port > 0:
self.req_sock = self.context.socket(zmq.REP)
self.req_sock.bind(address_template % self.req_port)
self.pub_sock = None
if self.pub_port > 0:
self.pub_sock = self.context.socket(zmq.PUB)
self.req_sock.bind(address_template % self.pub_port)
# Tracks the number of times on_monitor has been called
self.counter = 0
@wraps(TrainExtension.on_monitor)
def on_monitor(self, model, dataset, algorithm):
monitor = Monitor.get_monitor(model)
try:
rsqt_msg = self.req_sock.recv_pyobj(flags=zmq.NOBLOCK)
# Determine what type of message was received
rsp_msg = rsqt_msg.get_response()
if isinstance(rsp_msg, ChannelListResponse):
rsp_msg.data = list(monitor.channels.keys())
if isinstance(rsp_msg, ChannelsResponse):
channel_list = rsp_msg.channel_list
if (
not isinstance(channel_list, list)
or len(channel_list) == 0
):
channel_list = []
result = TypeError(
'ChannelResponse requires a list of channels.'
)
result = {}
for channel_name in channel_list:
if channel_name in monitor.channels.keys():
chan = copy.deepcopy(
monitor.channels[channel_name]
)
end = rsp_msg.end
if end == -1:
end = len(chan.batch_record)
# TODO copying and truncating the records individually
# like this is brittle. Is there a more robust
# solution?
chan.batch_record = chan.batch_record[
rsp_msg.start:end:rsp_msg.step
]
chan.epoch_record = chan.epoch_record[
rsp_msg.start:end:rsp_msg.step
]
chan.example_record = chan.example_record[
rsp_msg.start:end:rsp_msg.step
]
chan.time_record = chan.time_record[
rsp_msg.start:end:rsp_msg.step
]
chan.val_record = chan.val_record[
rsp_msg.start:end:rsp_msg.step
]
result[channel_name] = chan
else:
result[channel_name] = KeyError(
'Invalid channel: %s' % rsp_msg.channel_list
)
rsp_msg.data = result
self.req_sock.send_pyobj(rsp_msg)
except zmq.Again:
pass
self.counter += 1
class LiveMonitor(object):
"""
A utility class for requested data from a LiveMonitoring training
extension.
Parameters
----------
address : string
The IP address on which a LiveMonitoring process is listening.
req_port : int
The port number on which a LiveMonitoring process is listening.
"""
def __init__(self, address='127.0.0.1', req_port=5555):
"""
"""
if not zmq_available:
raise ImportError('zeromq needs to be installed to '
'use this module.')
self.address = 'tcp://%s' % address
assert(req_port > 0)
self.req_port = req_port
self.context = zmq.Context()
self.req_sock = self.context.socket(zmq.REQ)
self.req_sock.connect(self.address + ':' + str(self.req_port))
self.channels = {}
def list_channels(self):
"""
Returns a list of the channels being monitored.
"""
self.req_sock.send_pyobj(ChannelListRequest())
return self.req_sock.recv_pyobj()
def update_channels(self, channel_list, start=-1, end=-1, step=1):
"""
Retrieves data for a specified set of channels and combines that data
with any previously retrived data.
This assumes all the channels have the same number of values. It is
unclear as to whether this is a reasonable assumption. If they do not
have the same number of values then it may request to much or too
little data leading to duplicated data or wholes in the data
respectively. This could be made more robust by making a call to
retrieve all the data for all of the channels.
Parameters
----------
channel_list : list
A list of the channels for which data should be requested.
start : int
The starting epoch for which data should be requested.
step : int
The number of epochs to be skipped between data points.
"""
assert((start == -1 and end == -1) or end > start)
if start == -1:
start = 0
if len(self.channels.keys()) > 0:
channel_name = list(self.channels.keys())[0]
start = len(self.channels[channel_name].epoch_record)
self.req_sock.send_pyobj(ChannelsRequest(
channel_list, start=start, end=end, step=step
))
rsp_msg = self.req_sock.recv_pyobj()
if isinstance(rsp_msg.data, Exception):
raise rsp_msg.data
for channel in rsp_msg.data.keys():
rsp_chan = rsp_msg.data[channel]
if isinstance(rsp_chan, Exception):
raise rsp_chan
if channel not in self.channels.keys():
self.channels[channel] = rsp_chan
else:
chan = self.channels[channel]
chan.batch_record += rsp_chan.batch_record
chan.epoch_record += rsp_chan.epoch_record
chan.example_record += rsp_chan.example_record
chan.time_record += rsp_chan.time_record
chan.val_record += rsp_chan.val_record
def follow_channels(self, channel_list):
"""
Tracks and plots a specified set of channels in real time.
Parameters
----------
channel_list : list
A list of the channels for which data has been requested.
"""
if not pyplot_available:
raise ImportError('pyplot needs to be installed for '
'this functionality.')
plt.clf()
plt.ion()
while True:
self.update_channels(channel_list)
plt.clf()
for channel_name in self.channels:
plt.plot(
self.channels[channel_name].epoch_record,
self.channels[channel_name].val_record,
label=channel_name
)
plt.legend()
plt.ion()
plt.draw()
| bsd-3-clause |
joequant/zipline | tests/test_perf_tracking.py | 14 | 77177 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import pickle
import collections
from datetime import (
datetime,
timedelta,
)
import logging
import operator
import unittest
from nose_parameterized import parameterized
import nose.tools as nt
import pytz
import itertools
import pandas as pd
import numpy as np
from six.moves import range, zip
from zipline.assets import AssetFinder
import zipline.utils.factory as factory
import zipline.finance.performance as perf
from zipline.finance.slippage import Transaction, create_transaction
import zipline.utils.math_utils as zp_math
from zipline.gens.composites import date_sorted_sources
from zipline.finance.trading import SimulationParameters
from zipline.finance.blotter import Order
from zipline.finance.commission import PerShare, PerTrade, PerDollar
from zipline.finance.trading import with_environment
from zipline.utils.factory import create_random_simulation_parameters
import zipline.protocol as zp
from zipline.protocol import Event, DATASOURCE_TYPE
from zipline.sources.data_frame_source import DataPanelSource
logger = logging.getLogger('Test Perf Tracking')
onesec = timedelta(seconds=1)
oneday = timedelta(days=1)
tradingday = timedelta(hours=6, minutes=30)
# nose.tools changed name in python 3
if not hasattr(nt, 'assert_count_equal'):
nt.assert_count_equal = nt.assert_items_equal
def check_perf_period(pp,
gross_leverage,
net_leverage,
long_exposure,
longs_count,
short_exposure,
shorts_count):
perf_data = pp.to_dict()
np.testing.assert_allclose(
gross_leverage, perf_data['gross_leverage'], rtol=1e-3)
np.testing.assert_allclose(
net_leverage, perf_data['net_leverage'], rtol=1e-3)
np.testing.assert_allclose(
long_exposure, perf_data['long_exposure'], rtol=1e-3)
np.testing.assert_allclose(
longs_count, perf_data['longs_count'], rtol=1e-3)
np.testing.assert_allclose(
short_exposure, perf_data['short_exposure'], rtol=1e-3)
np.testing.assert_allclose(
shorts_count, perf_data['shorts_count'], rtol=1e-3)
def check_account(account,
settled_cash,
equity_with_loan,
total_positions_value,
regt_equity,
available_funds,
excess_liquidity,
cushion,
leverage,
net_leverage,
net_liquidation):
# this is a long only portfolio that is only partially invested
# so net and gross leverage are equal.
np.testing.assert_allclose(settled_cash,
account['settled_cash'], rtol=1e-3)
np.testing.assert_allclose(equity_with_loan,
account['equity_with_loan'], rtol=1e-3)
np.testing.assert_allclose(total_positions_value,
account['total_positions_value'], rtol=1e-3)
np.testing.assert_allclose(regt_equity,
account['regt_equity'], rtol=1e-3)
np.testing.assert_allclose(available_funds,
account['available_funds'], rtol=1e-3)
np.testing.assert_allclose(excess_liquidity,
account['excess_liquidity'], rtol=1e-3)
np.testing.assert_allclose(cushion,
account['cushion'], rtol=1e-3)
np.testing.assert_allclose(leverage, account['leverage'], rtol=1e-3)
np.testing.assert_allclose(net_leverage,
account['net_leverage'], rtol=1e-3)
np.testing.assert_allclose(net_liquidation,
account['net_liquidation'], rtol=1e-3)
def create_txn(trade_event, price, amount):
"""
Create a fake transaction to be filled and processed prior to the execution
of a given trade event.
"""
mock_order = Order(trade_event.dt, trade_event.sid, amount, id=None)
return create_transaction(trade_event, mock_order, price, amount)
@with_environment()
def benchmark_events_in_range(sim_params, env=None):
return [
Event({'dt': dt,
'returns': ret,
'type': zp.DATASOURCE_TYPE.BENCHMARK,
# We explicitly rely on the behavior that benchmarks sort before
# any other events.
'source_id': '1Abenchmarks'})
for dt, ret in env.benchmark_returns.iteritems()
if dt.date() >= sim_params.period_start.date() and
dt.date() <= sim_params.period_end.date()
]
def calculate_results(host,
trade_events,
dividend_events=None,
splits=None,
txns=None):
"""
Run the given events through a stripped down version of the loop in
AlgorithmSimulator.transform.
IMPORTANT NOTE FOR TEST WRITERS/READERS:
This loop has some wonky logic for the order of event processing for
datasource types. This exists mostly to accomodate legacy tests accomodate
existing tests that were making assumptions about how events would be
sorted.
In particular:
- Dividends passed for a given date are processed PRIOR to any events
for that date.
- Splits passed for a given date are process AFTER any events for that
date.
Tests that use this helper should not be considered useful guarantees of
the behavior of AlgorithmSimulator on a stream containing the same events
unless the subgroups have been explicitly re-sorted in this way.
"""
txns = txns or []
splits = splits or []
perf_tracker = perf.PerformanceTracker(host.sim_params)
if dividend_events is not None:
dividend_frame = pd.DataFrame(
[
event.to_series(index=zp.DIVIDEND_FIELDS)
for event in dividend_events
],
)
perf_tracker.update_dividends(dividend_frame)
# Raw trades
trade_events = sorted(trade_events, key=lambda ev: (ev.dt, ev.source_id))
# Add a benchmark event for each date.
trades_plus_bm = date_sorted_sources(trade_events, host.benchmark_events)
# Filter out benchmark events that are later than the last trade date.
filtered_trades_plus_bm = (filt_event for filt_event in trades_plus_bm
if filt_event.dt <= trade_events[-1].dt)
grouped_trades_plus_bm = itertools.groupby(filtered_trades_plus_bm,
lambda x: x.dt)
results = []
bm_updated = False
for date, group in grouped_trades_plus_bm:
for txn in filter(lambda txn: txn.dt == date, txns):
# Process txns for this date.
perf_tracker.process_transaction(txn)
for event in group:
if event.type == zp.DATASOURCE_TYPE.TRADE:
perf_tracker.process_trade(event)
elif event.type == zp.DATASOURCE_TYPE.DIVIDEND:
perf_tracker.process_dividend(event)
elif event.type == zp.DATASOURCE_TYPE.BENCHMARK:
perf_tracker.process_benchmark(event)
bm_updated = True
elif event.type == zp.DATASOURCE_TYPE.COMMISSION:
perf_tracker.process_commission(event)
for split in filter(lambda split: split.dt == date, splits):
# Process splits for this date.
perf_tracker.process_split(split)
if bm_updated:
msg = perf_tracker.handle_market_close_daily()
msg['account'] = perf_tracker.get_account(True)
results.append(msg)
bm_updated = False
return results
def check_perf_tracker_serialization(perf_tracker):
scalar_keys = [
'emission_rate',
'txn_count',
'market_open',
'last_close',
'_dividend_count',
'period_start',
'day_count',
'capital_base',
'market_close',
'saved_dt',
'period_end',
'total_days',
]
p_string = pickle.dumps(perf_tracker)
test = pickle.loads(p_string)
for k in scalar_keys:
nt.assert_equal(getattr(test, k), getattr(perf_tracker, k), k)
for period in test.perf_periods:
nt.assert_true(hasattr(period, '_position_tracker'))
class TestSplitPerformance(unittest.TestCase):
def setUp(self):
self.sim_params, self.dt, self.end_dt = \
create_random_simulation_parameters()
# start with $10,000
self.sim_params.capital_base = 10e3
self.benchmark_events = benchmark_events_in_range(self.sim_params)
def test_split_long_position(self):
events = factory.create_trade_history(
1,
[20, 20],
[100, 100],
oneday,
self.sim_params
)
# set up a long position in sid 1
# 100 shares at $20 apiece = $2000 position
txns = [create_txn(events[0], 20, 100)]
# set up a split with ratio 3 occurring at the start of the second
# day.
splits = [
factory.create_split(
1,
3,
events[1].dt,
),
]
results = calculate_results(self, events, txns=txns, splits=splits)
# should have 33 shares (at $60 apiece) and $20 in cash
self.assertEqual(2, len(results))
latest_positions = results[1]['daily_perf']['positions']
self.assertEqual(1, len(latest_positions))
# check the last position to make sure it's been updated
position = latest_positions[0]
self.assertEqual(1, position['sid'])
self.assertEqual(33, position['amount'])
self.assertEqual(60, position['cost_basis'])
self.assertEqual(60, position['last_sale_price'])
# since we started with $10000, and we spent $2000 on the
# position, but then got $20 back, we should have $8020
# (or close to it) in cash.
# we won't get exactly 8020 because sometimes a split is
# denoted as a ratio like 0.3333, and we lose some digits
# of precision. thus, make sure we're pretty close.
daily_perf = results[1]['daily_perf']
self.assertTrue(
zp_math.tolerant_equals(8020,
daily_perf['ending_cash'], 1))
# Validate that the account attributes were updated.
account = results[1]['account']
self.assertEqual(float('inf'), account['day_trades_remaining'])
# this is a long only portfolio that is only partially invested
# so net and gross leverage are equal.
np.testing.assert_allclose(0.198, account['leverage'], rtol=1e-3)
np.testing.assert_allclose(0.198, account['net_leverage'], rtol=1e-3)
np.testing.assert_allclose(8020, account['regt_equity'], rtol=1e-3)
self.assertEqual(float('inf'), account['regt_margin'])
np.testing.assert_allclose(8020, account['available_funds'], rtol=1e-3)
self.assertEqual(0, account['maintenance_margin_requirement'])
np.testing.assert_allclose(10000,
account['equity_with_loan'], rtol=1e-3)
self.assertEqual(float('inf'), account['buying_power'])
self.assertEqual(0, account['initial_margin_requirement'])
np.testing.assert_allclose(8020, account['excess_liquidity'],
rtol=1e-3)
np.testing.assert_allclose(8020, account['settled_cash'], rtol=1e-3)
np.testing.assert_allclose(10000, account['net_liquidation'],
rtol=1e-3)
np.testing.assert_allclose(0.802, account['cushion'], rtol=1e-3)
np.testing.assert_allclose(1980, account['total_positions_value'],
rtol=1e-3)
self.assertEqual(0, account['accrued_interest'])
for i, result in enumerate(results):
for perf_kind in ('daily_perf', 'cumulative_perf'):
perf_result = result[perf_kind]
# prices aren't changing, so pnl and returns should be 0.0
self.assertEqual(0.0, perf_result['pnl'],
"day %s %s pnl %s instead of 0.0" %
(i, perf_kind, perf_result['pnl']))
self.assertEqual(0.0, perf_result['returns'],
"day %s %s returns %s instead of 0.0" %
(i, perf_kind, perf_result['returns']))
class TestCommissionEvents(unittest.TestCase):
def setUp(self):
self.sim_params, self.dt, self.end_dt = \
create_random_simulation_parameters()
logger.info("sim_params: %s, dt: %s, end_dt: %s" %
(self.sim_params, self.dt, self.end_dt))
self.sim_params.capital_base = 10e3
self.benchmark_events = benchmark_events_in_range(self.sim_params)
def test_commission_event(self):
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params
)
# Test commission models and validate result
# Expected commission amounts:
# PerShare commission: 1.00, 1.00, 1.50 = $3.50
# PerTrade commission: 5.00, 5.00, 5.00 = $15.00
# PerDollar commission: 1.50, 3.00, 4.50 = $9.00
# Total commission = $3.50 + $15.00 + $9.00 = $27.50
# Create 3 transactions: 50, 100, 150 shares traded @ $20
transactions = [create_txn(events[0], 20, i)
for i in [50, 100, 150]]
# Create commission models and validate that produce expected
# commissions.
models = [PerShare(cost=0.01, min_trade_cost=1.00),
PerTrade(cost=5.00),
PerDollar(cost=0.0015)]
expected_results = [3.50, 15.0, 9.0]
for model, expected in zip(models, expected_results):
total_commission = 0
for trade in transactions:
total_commission += model.calculate(trade)[1]
self.assertEqual(total_commission, expected)
# Verify that commission events are handled correctly by
# PerformanceTracker.
cash_adj_dt = events[0].dt
cash_adjustment = factory.create_commission(1, 300.0, cash_adj_dt)
events.append(cash_adjustment)
# Insert a purchase order.
txns = [create_txn(events[0], 20, 1)]
results = calculate_results(self, events, txns=txns)
# Validate that we lost 320 dollars from our cash pool.
self.assertEqual(results[-1]['cumulative_perf']['ending_cash'],
9680)
# Validate that the cost basis of our position changed.
self.assertEqual(results[-1]['daily_perf']['positions']
[0]['cost_basis'], 320.0)
# Validate that the account attributes were updated.
account = results[1]['account']
self.assertEqual(float('inf'), account['day_trades_remaining'])
np.testing.assert_allclose(0.001, account['leverage'], rtol=1e-3,
atol=1e-4)
np.testing.assert_allclose(9680, account['regt_equity'], rtol=1e-3)
self.assertEqual(float('inf'), account['regt_margin'])
np.testing.assert_allclose(9680, account['available_funds'],
rtol=1e-3)
self.assertEqual(0, account['maintenance_margin_requirement'])
np.testing.assert_allclose(9690,
account['equity_with_loan'], rtol=1e-3)
self.assertEqual(float('inf'), account['buying_power'])
self.assertEqual(0, account['initial_margin_requirement'])
np.testing.assert_allclose(9680, account['excess_liquidity'],
rtol=1e-3)
np.testing.assert_allclose(9680, account['settled_cash'],
rtol=1e-3)
np.testing.assert_allclose(9690, account['net_liquidation'],
rtol=1e-3)
np.testing.assert_allclose(0.999, account['cushion'], rtol=1e-3)
np.testing.assert_allclose(10, account['total_positions_value'],
rtol=1e-3)
self.assertEqual(0, account['accrued_interest'])
def test_commission_zero_position(self):
"""
Ensure no div-by-zero errors.
"""
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params
)
# Buy and sell the same sid so that we have a zero position by the
# time of events[3].
txns = [
create_txn(events[0], 20, 1),
create_txn(events[1], 20, -1),
]
# Add a cash adjustment at the time of event[3].
cash_adj_dt = events[3].dt
cash_adjustment = factory.create_commission(1, 300.0, cash_adj_dt)
events.append(cash_adjustment)
results = calculate_results(self, events, txns=txns)
# Validate that we lost 300 dollars from our cash pool.
self.assertEqual(results[-1]['cumulative_perf']['ending_cash'],
9700)
def test_commission_no_position(self):
"""
Ensure no position-not-found or sid-not-found errors.
"""
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params
)
# Add a cash adjustment at the time of event[3].
cash_adj_dt = events[3].dt
cash_adjustment = factory.create_commission(1, 300.0, cash_adj_dt)
events.append(cash_adjustment)
results = calculate_results(self, events)
# Validate that we lost 300 dollars from our cash pool.
self.assertEqual(results[-1]['cumulative_perf']['ending_cash'],
9700)
class TestDividendPerformance(unittest.TestCase):
def setUp(self):
self.sim_params, self.dt, self.end_dt = \
create_random_simulation_parameters()
self.sim_params.capital_base = 10e3
self.benchmark_events = benchmark_events_in_range(self.sim_params)
def test_market_hours_calculations(self):
# DST in US/Eastern began on Sunday March 14, 2010
before = datetime(2010, 3, 12, 14, 31, tzinfo=pytz.utc)
after = factory.get_next_trading_dt(
before,
timedelta(days=1)
)
self.assertEqual(after.hour, 13)
def test_long_position_receives_dividend(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params
)
dividend = factory.create_dividend(
1,
10.00,
# declared date, when the algorithm finds out about
# the dividend
events[0].dt,
# ex_date, the date before which the algorithm must hold stock
# to receive the dividend
events[1].dt,
# pay date, when the algorithm receives the dividend.
events[2].dt
)
# Simulate a transaction being filled prior to the ex_date.
txns = [create_txn(events[0], 10.0, 100)]
results = calculate_results(
self,
events,
dividend_events=[dividend],
txns=txns,
)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0.0, 0.0, 0.1, 0.1, 0.1])
daily_returns = [event['daily_perf']['returns']
for event in results]
self.assertEqual(daily_returns, [0.0, 0.0, 0.10, 0.0, 0.0])
cash_flows = [event['daily_perf']['capital_used']
for event in results]
self.assertEqual(cash_flows, [-1000, 0, 1000, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [-1000, -1000, 0, 0, 0])
cash_pos = \
[event['cumulative_perf']['ending_cash'] for event in results]
self.assertEqual(cash_pos, [9000, 9000, 10000, 10000, 10000])
def test_long_position_receives_stock_dividend(self):
# post some trades in the market
events = []
for sid in (1, 2):
events.extend(
factory.create_trade_history(
sid,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params)
)
dividend = factory.create_stock_dividend(
1,
payment_sid=2,
ratio=2,
# declared date, when the algorithm finds out about
# the dividend
declared_date=events[0].dt,
# ex_date, the date before which the algorithm must hold stock
# to receive the dividend
ex_date=events[1].dt,
# pay date, when the algorithm receives the dividend.
pay_date=events[2].dt
)
txns = [create_txn(events[0], 10.0, 100)]
results = calculate_results(
self,
events,
dividend_events=[dividend],
txns=txns,
)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0.0, 0.0, 0.2, 0.2, 0.2])
daily_returns = [event['daily_perf']['returns']
for event in results]
self.assertEqual(daily_returns, [0.0, 0.0, 0.2, 0.0, 0.0])
cash_flows = [event['daily_perf']['capital_used']
for event in results]
self.assertEqual(cash_flows, [-1000, 0, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [-1000] * 5)
cash_pos = \
[event['cumulative_perf']['ending_cash'] for event in results]
self.assertEqual(cash_pos, [9000] * 5)
def test_long_position_purchased_on_ex_date_receives_no_dividend(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params
)
dividend = factory.create_dividend(
1,
10.00,
events[0].dt, # Declared date
events[1].dt, # Exclusion date
events[2].dt # Pay date
)
# Simulate a transaction being filled on the ex_date.
txns = [create_txn(events[1], 10.0, 100)]
results = calculate_results(
self,
events,
dividend_events=[dividend],
txns=txns,
)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0, 0, 0, 0, 0])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0, 0, 0, 0, 0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [0, -1000, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows,
[0, -1000, -1000, -1000, -1000])
def test_selling_before_dividend_payment_still_gets_paid(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params
)
dividend = factory.create_dividend(
1,
10.00,
events[0].dt, # Declared date
events[1].dt, # Exclusion date
events[3].dt # Pay date
)
buy_txn = create_txn(events[0], 10.0, 100)
sell_txn = create_txn(events[2], 10.0, -100)
txns = [buy_txn, sell_txn]
results = calculate_results(
self,
events,
dividend_events=[dividend],
txns=txns,
)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0, 0, 0, 0.1, 0.1])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0, 0, 0, 0.1, 0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [-1000, 0, 1000, 1000, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [-1000, -1000, 0, 1000, 1000])
def test_buy_and_sell_before_ex(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10, 10],
[100, 100, 100, 100, 100, 100],
oneday,
self.sim_params
)
dividend = factory.create_dividend(
1,
10.00,
events[3].dt,
events[4].dt,
events[5].dt
)
buy_txn = create_txn(events[1], 10.0, 100)
sell_txn = create_txn(events[2], 10.0, -100)
txns = [buy_txn, sell_txn]
results = calculate_results(
self,
events,
dividend_events=[dividend],
txns=txns,
)
self.assertEqual(len(results), 6)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0, 0, 0, 0, 0, 0])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0, 0, 0, 0, 0, 0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [0, -1000, 1000, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [0, -1000, 0, 0, 0, 0])
def test_ending_before_pay_date(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params
)
pay_date = self.sim_params.first_open
# find pay date that is much later.
for i in range(30):
pay_date = factory.get_next_trading_dt(pay_date, oneday)
dividend = factory.create_dividend(
1,
10.00,
events[0].dt,
events[0].dt,
pay_date
)
txns = [create_txn(events[1], 10.0, 100)]
results = calculate_results(
self,
events,
dividend_events=[dividend],
txns=txns,
)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0, 0, 0, 0.0, 0.0])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0, 0, 0, 0, 0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [0, -1000, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(
cumulative_cash_flows,
[0, -1000, -1000, -1000, -1000]
)
def test_short_position_pays_dividend(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params
)
dividend = factory.create_dividend(
1,
10.00,
# declare at open of test
events[0].dt,
# ex_date same as trade 2
events[2].dt,
events[3].dt
)
txns = [create_txn(events[1], 10.0, -100)]
results = calculate_results(
self,
events,
dividend_events=[dividend],
txns=txns,
)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0.0, 0.0, 0.0, -0.1, -0.1])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0.0, 0.0, 0.0, -0.1, 0.0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [0, 1000, 0, -1000, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [0, 1000, 1000, 0, 0])
def test_no_position_receives_no_dividend(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params
)
dividend = factory.create_dividend(
1,
10.00,
events[0].dt,
events[1].dt,
events[2].dt
)
results = calculate_results(
self,
events,
dividend_events=[dividend],
)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0.0, 0.0, 0.0, 0.0, 0.0])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0.0, 0.0, 0.0, 0.0, 0.0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [0, 0, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [0, 0, 0, 0, 0])
@with_environment()
def test_no_dividend_at_simulation_end(self, env=None):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params
)
dividend = factory.create_dividend(
1,
10.00,
# declared date, when the algorithm finds out about
# the dividend
events[-3].dt,
# ex_date, the date before which the algorithm must hold stock
# to receive the dividend
events[-2].dt,
# pay date, when the algorithm receives the dividend.
# This pays out on the day after the last event
env.next_trading_day(events[-1].dt)
)
# Set the last day to be the last event
self.sim_params.period_end = events[-1].dt
self.sim_params._update_internal()
# Simulate a transaction being filled prior to the ex_date.
txns = [create_txn(events[0], 10.0, 100)]
results = calculate_results(
self,
events,
dividend_events=[dividend],
txns=txns,
)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0.0, 0.0, 0.0, 0.0, 0.0])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0.0, 0.0, 0.0, 0.0, 0.0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [-1000, 0, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows,
[-1000, -1000, -1000, -1000, -1000])
class TestDividendPerformanceHolidayStyle(TestDividendPerformance):
# The holiday tests begins the simulation on the day
# before Thanksgiving, so that the next trading day is
# two days ahead. Any tests that hard code events
# to be start + oneday will fail, since those events will
# be skipped by the simulation.
def setUp(self):
self.dt = datetime(2003, 11, 30, tzinfo=pytz.utc)
self.end_dt = datetime(2004, 11, 25, tzinfo=pytz.utc)
self.sim_params = SimulationParameters(
self.dt,
self.end_dt)
self.benchmark_events = benchmark_events_in_range(self.sim_params)
class TestPositionPerformance(unittest.TestCase):
def setUp(self):
self.sim_params, self.dt, self.end_dt = \
create_random_simulation_parameters()
self.benchmark_events = benchmark_events_in_range(self.sim_params)
def test_long_short_positions(self):
"""
start with $1000
buy 100 stock1 shares at $10
sell short 100 stock2 shares at $10
stock1 then goes down to $9
stock2 goes to $11
"""
trades_1 = factory.create_trade_history(
1,
[10, 10, 10, 9],
[100, 100, 100, 100],
onesec,
self.sim_params
)
trades_2 = factory.create_trade_history(
2,
[10, 10, 10, 11],
[100, 100, 100, 100],
onesec,
self.sim_params
)
txn1 = create_txn(trades_1[1], 10.0, 100)
txn2 = create_txn(trades_2[1], 10.0, -100)
pt = perf.PositionTracker()
pp = perf.PerformancePeriod(1000.0)
pp.position_tracker = pt
pt.execute_transaction(txn1)
pp.handle_execution(txn1)
pt.execute_transaction(txn2)
pp.handle_execution(txn2)
for trade in itertools.chain(trades_1[:-2], trades_2[:-2]):
pt.update_last_sale(trade)
pp.calculate_performance()
check_perf_period(
pp,
gross_leverage=2.0,
net_leverage=0.0,
long_exposure=1000.0,
longs_count=1,
short_exposure=-1000.0,
shorts_count=1)
# Validate that the account attributes were updated.
account = pp.as_account()
check_account(account,
settled_cash=1000.0,
equity_with_loan=1000.0,
total_positions_value=0.0,
regt_equity=1000.0,
available_funds=1000.0,
excess_liquidity=1000.0,
cushion=1.0,
leverage=2.0,
net_leverage=0.0,
net_liquidation=1000.0)
# now simulate stock1 going to $9
pt.update_last_sale(trades_1[-1])
# and stock2 going to $11
pt.update_last_sale(trades_2[-1])
pp.calculate_performance()
# Validate that the account attributes were updated.
account = pp.as_account()
check_perf_period(
pp,
gross_leverage=2.5,
net_leverage=-0.25,
long_exposure=900.0,
longs_count=1,
short_exposure=-1100.0,
shorts_count=1)
check_account(account,
settled_cash=1000.0,
equity_with_loan=800.0,
total_positions_value=-200.0,
regt_equity=1000.0,
available_funds=1000.0,
excess_liquidity=1000.0,
cushion=1.25,
leverage=2.5,
net_leverage=-0.25,
net_liquidation=800.0)
def test_levered_long_position(self):
"""
start with $1,000, then buy 1000 shares at $10.
price goes to $11
"""
# post some trades in the market
trades = factory.create_trade_history(
1,
[10, 10, 10, 11],
[100, 100, 100, 100],
onesec,
self.sim_params
)
txn = create_txn(trades[1], 10.0, 1000)
pt = perf.PositionTracker()
pp = perf.PerformancePeriod(1000.0)
pp.position_tracker = pt
pt.execute_transaction(txn)
pp.handle_execution(txn)
for trade in trades[:-2]:
pt.update_last_sale(trade)
pp.calculate_performance()
check_perf_period(
pp,
gross_leverage=10.0,
net_leverage=10.0,
long_exposure=10000.0,
longs_count=1,
short_exposure=0.0,
shorts_count=0)
# Validate that the account attributes were updated.
account = pp.as_account()
check_account(account,
settled_cash=-9000.0,
equity_with_loan=1000.0,
total_positions_value=10000.0,
regt_equity=-9000.0,
available_funds=-9000.0,
excess_liquidity=-9000.0,
cushion=-9.0,
leverage=10.0,
net_leverage=10.0,
net_liquidation=1000.0)
# now simulate a price jump to $11
pt.update_last_sale(trades[-1])
pp.calculate_performance()
check_perf_period(
pp,
gross_leverage=5.5,
net_leverage=5.5,
long_exposure=11000.0,
longs_count=1,
short_exposure=0.0,
shorts_count=0)
# Validate that the account attributes were updated.
account = pp.as_account()
check_account(account,
settled_cash=-9000.0,
equity_with_loan=2000.0,
total_positions_value=11000.0,
regt_equity=-9000.0,
available_funds=-9000.0,
excess_liquidity=-9000.0,
cushion=-4.5,
leverage=5.5,
net_leverage=5.5,
net_liquidation=2000.0)
def test_long_position(self):
"""
verify that the performance period calculates properly for a
single buy transaction
"""
# post some trades in the market
trades = factory.create_trade_history(
1,
[10, 10, 10, 11],
[100, 100, 100, 100],
onesec,
self.sim_params
)
txn = create_txn(trades[1], 10.0, 100)
pt = perf.PositionTracker()
pp = perf.PerformancePeriod(1000.0)
pp.position_tracker = pt
pt.execute_transaction(txn)
pp.handle_execution(txn)
# This verifies that the last sale price is being correctly
# set in the positions. If this is not the case then returns can
# incorrectly show as sharply dipping if a transaction arrives
# before a trade. This is caused by returns being based on holding
# stocks with a last sale price of 0.
self.assertEqual(pp.positions[1].last_sale_price, 10.0)
for trade in trades:
pt.update_last_sale(trade)
pp.calculate_performance()
self.assertEqual(
pp.period_cash_flow,
-1 * txn.price * txn.amount,
"capital used should be equal to the opposite of the transaction \
cost of sole txn in test"
)
self.assertEqual(
len(pp.positions),
1,
"should be just one position")
self.assertEqual(
pp.positions[1].sid,
txn.sid,
"position should be in security with id 1")
self.assertEqual(
pp.positions[1].amount,
txn.amount,
"should have a position of {sharecount} shares".format(
sharecount=txn.amount
)
)
self.assertEqual(
pp.positions[1].cost_basis,
txn.price,
"should have a cost basis of 10"
)
self.assertEqual(
pp.positions[1].last_sale_price,
trades[-1]['price'],
"last sale should be same as last trade. \
expected {exp} actual {act}".format(
exp=trades[-1]['price'],
act=pp.positions[1].last_sale_price)
)
self.assertEqual(
pp.ending_value,
1100,
"ending value should be price of last trade times number of \
shares in position"
)
self.assertEqual(pp.pnl, 100, "gain of 1 on 100 shares should be 100")
check_perf_period(
pp,
gross_leverage=1.0,
net_leverage=1.0,
long_exposure=1100.0,
longs_count=1,
short_exposure=0.0,
shorts_count=0)
# Validate that the account attributes were updated.
account = pp.as_account()
check_account(account,
settled_cash=0.0,
equity_with_loan=1100.0,
total_positions_value=1100.0,
regt_equity=0.0,
available_funds=0.0,
excess_liquidity=0.0,
cushion=0.0,
leverage=1.0,
net_leverage=1.0,
net_liquidation=1100.0)
def test_short_position(self):
"""verify that the performance period calculates properly for a \
single short-sale transaction"""
trades = factory.create_trade_history(
1,
[10, 10, 10, 11, 10, 9],
[100, 100, 100, 100, 100, 100],
onesec,
self.sim_params
)
trades_1 = trades[:-2]
txn = create_txn(trades[1], 10.0, -100)
pt = perf.PositionTracker()
pp = perf.PerformancePeriod(1000.0)
pp.position_tracker = pt
pt.execute_transaction(txn)
pp.handle_execution(txn)
for trade in trades_1:
pt.update_last_sale(trade)
pp.calculate_performance()
self.assertEqual(
pp.period_cash_flow,
-1 * txn.price * txn.amount,
"capital used should be equal to the opposite of the transaction\
cost of sole txn in test"
)
self.assertEqual(
len(pp.positions),
1,
"should be just one position")
self.assertEqual(
pp.positions[1].sid,
txn.sid,
"position should be in security from the transaction"
)
self.assertEqual(
pp.positions[1].amount,
-100,
"should have a position of -100 shares"
)
self.assertEqual(
pp.positions[1].cost_basis,
txn.price,
"should have a cost basis of 10"
)
self.assertEqual(
pp.positions[1].last_sale_price,
trades_1[-1]['price'],
"last sale should be price of last trade"
)
self.assertEqual(
pp.ending_value,
-1100,
"ending value should be price of last trade times number of \
shares in position"
)
self.assertEqual(pp.pnl, -100, "gain of 1 on 100 shares should be 100")
# simulate additional trades, and ensure that the position value
# reflects the new price
trades_2 = trades[-2:]
# simulate a rollover to a new period
pp.rollover()
for trade in trades_2:
pt.update_last_sale(trade)
pp.calculate_performance()
self.assertEqual(
pp.period_cash_flow,
0,
"capital used should be zero, there were no transactions in \
performance period"
)
self.assertEqual(
len(pp.positions),
1,
"should be just one position"
)
self.assertEqual(
pp.positions[1].sid,
txn.sid,
"position should be in security from the transaction"
)
self.assertEqual(
pp.positions[1].amount,
-100,
"should have a position of -100 shares"
)
self.assertEqual(
pp.positions[1].cost_basis,
txn.price,
"should have a cost basis of 10"
)
self.assertEqual(
pp.positions[1].last_sale_price,
trades_2[-1].price,
"last sale should be price of last trade"
)
self.assertEqual(
pp.ending_value,
-900,
"ending value should be price of last trade times number of \
shares in position")
self.assertEqual(
pp.pnl,
200,
"drop of 2 on -100 shares should be 200"
)
# now run a performance period encompassing the entire trade sample.
ptTotal = perf.PositionTracker()
ppTotal = perf.PerformancePeriod(1000.0)
ppTotal.position_tracker = pt
for trade in trades_1:
ptTotal.update_last_sale(trade)
ptTotal.execute_transaction(txn)
ppTotal.handle_execution(txn)
for trade in trades_2:
ptTotal.update_last_sale(trade)
ppTotal.calculate_performance()
self.assertEqual(
ppTotal.period_cash_flow,
-1 * txn.price * txn.amount,
"capital used should be equal to the opposite of the transaction \
cost of sole txn in test"
)
self.assertEqual(
len(ppTotal.positions),
1,
"should be just one position"
)
self.assertEqual(
ppTotal.positions[1].sid,
txn.sid,
"position should be in security from the transaction"
)
self.assertEqual(
ppTotal.positions[1].amount,
-100,
"should have a position of -100 shares"
)
self.assertEqual(
ppTotal.positions[1].cost_basis,
txn.price,
"should have a cost basis of 10"
)
self.assertEqual(
ppTotal.positions[1].last_sale_price,
trades_2[-1].price,
"last sale should be price of last trade"
)
self.assertEqual(
ppTotal.ending_value,
-900,
"ending value should be price of last trade times number of \
shares in position")
self.assertEqual(
ppTotal.pnl,
100,
"drop of 1 on -100 shares should be 100"
)
check_perf_period(
pp,
gross_leverage=0.8181,
net_leverage=-0.8181,
long_exposure=0.0,
longs_count=0,
short_exposure=-900.0,
shorts_count=1)
# Validate that the account attributes.
account = ppTotal.as_account()
check_account(account,
settled_cash=2000.0,
equity_with_loan=1100.0,
total_positions_value=-900.0,
regt_equity=2000.0,
available_funds=2000.0,
excess_liquidity=2000.0,
cushion=1.8181,
leverage=0.8181,
net_leverage=-0.8181,
net_liquidation=1100.0)
def test_covering_short(self):
"""verify performance where short is bought and covered, and shares \
trade after cover"""
trades = factory.create_trade_history(
1,
[10, 10, 10, 11, 9, 8, 7, 8, 9, 10],
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100],
onesec,
self.sim_params
)
short_txn = create_txn(
trades[1],
10.0,
-100,
)
cover_txn = create_txn(trades[6], 7.0, 100)
pt = perf.PositionTracker()
pp = perf.PerformancePeriod(1000.0)
pp.position_tracker = pt
pt.execute_transaction(short_txn)
pp.handle_execution(short_txn)
pt.execute_transaction(cover_txn)
pp.handle_execution(cover_txn)
for trade in trades:
pt.update_last_sale(trade)
pp.calculate_performance()
short_txn_cost = short_txn.price * short_txn.amount
cover_txn_cost = cover_txn.price * cover_txn.amount
self.assertEqual(
pp.period_cash_flow,
-1 * short_txn_cost - cover_txn_cost,
"capital used should be equal to the net transaction costs"
)
self.assertEqual(
len(pp.positions),
1,
"should be just one position"
)
self.assertEqual(
pp.positions[1].sid,
short_txn.sid,
"position should be in security from the transaction"
)
self.assertEqual(
pp.positions[1].amount,
0,
"should have a position of -100 shares"
)
self.assertEqual(
pp.positions[1].cost_basis,
0,
"a covered position should have a cost basis of 0"
)
self.assertEqual(
pp.positions[1].last_sale_price,
trades[-1].price,
"last sale should be price of last trade"
)
self.assertEqual(
pp.ending_value,
0,
"ending value should be price of last trade times number of \
shares in position"
)
self.assertEqual(
pp.pnl,
300,
"gain of 1 on 100 shares should be 300"
)
check_perf_period(
pp,
gross_leverage=0.0,
net_leverage=0.0,
long_exposure=0.0,
longs_count=0,
short_exposure=0.0,
shorts_count=0)
account = pp.as_account()
check_account(account,
settled_cash=1300.0,
equity_with_loan=1300.0,
total_positions_value=0.0,
regt_equity=1300.0,
available_funds=1300.0,
excess_liquidity=1300.0,
cushion=1.0,
leverage=0.0,
net_leverage=0.0,
net_liquidation=1300.0)
def test_cost_basis_calc(self):
history_args = (
1,
[10, 11, 11, 12],
[100, 100, 100, 100],
onesec,
self.sim_params
)
trades = factory.create_trade_history(*history_args)
transactions = factory.create_txn_history(*history_args)
pt = perf.PositionTracker()
pp = perf.PerformancePeriod(1000.0)
pp.position_tracker = pt
average_cost = 0
for i, txn in enumerate(transactions):
pt.execute_transaction(txn)
pp.handle_execution(txn)
average_cost = (average_cost * i + txn.price) / (i + 1)
self.assertEqual(pp.positions[1].cost_basis, average_cost)
for trade in trades:
pt.update_last_sale(trade)
pp.calculate_performance()
self.assertEqual(
pp.positions[1].last_sale_price,
trades[-1].price,
"should have a last sale of 12, got {val}".format(
val=pp.positions[1].last_sale_price)
)
self.assertEqual(
pp.positions[1].cost_basis,
11,
"should have a cost basis of 11"
)
self.assertEqual(
pp.pnl,
400
)
down_tick = factory.create_trade(
1,
10.0,
100,
trades[-1].dt + onesec)
sale_txn = create_txn(
down_tick,
10.0,
-100)
pp.rollover()
pt.execute_transaction(sale_txn)
pp.handle_execution(sale_txn)
pt.update_last_sale(down_tick)
pp.calculate_performance()
self.assertEqual(
pp.positions[1].last_sale_price,
10,
"should have a last sale of 10, was {val}".format(
val=pp.positions[1].last_sale_price)
)
self.assertEqual(
pp.positions[1].cost_basis,
11,
"should have a cost basis of 11"
)
self.assertEqual(pp.pnl, -800, "this period goes from +400 to -400")
pt3 = perf.PositionTracker()
pp3 = perf.PerformancePeriod(1000.0)
pp3.position_tracker = pt3
average_cost = 0
for i, txn in enumerate(transactions):
pt3.execute_transaction(txn)
pp3.handle_execution(txn)
average_cost = (average_cost * i + txn.price) / (i + 1)
self.assertEqual(pp3.positions[1].cost_basis, average_cost)
pt3.execute_transaction(sale_txn)
pp3.handle_execution(sale_txn)
trades.append(down_tick)
for trade in trades:
pt3.update_last_sale(trade)
pp3.calculate_performance()
self.assertEqual(
pp3.positions[1].last_sale_price,
10,
"should have a last sale of 10"
)
self.assertEqual(
pp3.positions[1].cost_basis,
11,
"should have a cost basis of 11"
)
self.assertEqual(
pp3.pnl,
-400,
"should be -400 for all trades and transactions in period"
)
def test_cost_basis_calc_close_pos(self):
history_args = (
1,
[10, 9, 11, 8, 9, 12, 13, 14],
[200, -100, -100, 100, -300, 100, 500, 400],
onesec,
self.sim_params
)
cost_bases = [10, 10, 0, 8, 9, 9, 13, 13.5]
trades = factory.create_trade_history(*history_args)
transactions = factory.create_txn_history(*history_args)
pt = perf.PositionTracker()
pp = perf.PerformancePeriod(1000.0)
pp.position_tracker = pt
for txn, cb in zip(transactions, cost_bases):
pt.execute_transaction(txn)
pp.handle_execution(txn)
self.assertEqual(pp.positions[1].cost_basis, cb)
for trade in trades:
pt.update_last_sale(trade)
pp.calculate_performance()
self.assertEqual(pp.positions[1].cost_basis, cost_bases[-1])
class TestPerformanceTracker(unittest.TestCase):
NumDaysToDelete = collections.namedtuple(
'NumDaysToDelete', ('start', 'middle', 'end'))
@parameterized.expand([
("Don't delete any events",
NumDaysToDelete(start=0, middle=0, end=0)),
("Delete first day of events",
NumDaysToDelete(start=1, middle=0, end=0)),
("Delete first two days of events",
NumDaysToDelete(start=2, middle=0, end=0)),
("Delete one day of events from the middle",
NumDaysToDelete(start=0, middle=1, end=0)),
("Delete two events from the middle",
NumDaysToDelete(start=0, middle=2, end=0)),
("Delete last day of events",
NumDaysToDelete(start=0, middle=0, end=1)),
("Delete last two days of events",
NumDaysToDelete(start=0, middle=0, end=2)),
("Delete all but one event.",
NumDaysToDelete(start=2, middle=1, end=2)),
])
def test_tracker(self, parameter_comment, days_to_delete):
"""
@days_to_delete - configures which days in the data set we should
remove, used for ensuring that we still return performance messages
even when there is no data.
"""
# This date range covers Columbus day,
# however Columbus day is not a market holiday
#
# October 2008
# Su Mo Tu We Th Fr Sa
# 1 2 3 4
# 5 6 7 8 9 10 11
# 12 13 14 15 16 17 18
# 19 20 21 22 23 24 25
# 26 27 28 29 30 31
start_dt = datetime(year=2008,
month=10,
day=9,
tzinfo=pytz.utc)
end_dt = datetime(year=2008,
month=10,
day=16,
tzinfo=pytz.utc)
trade_count = 6
sid = 133
price = 10.1
price_list = [price] * trade_count
volume = [100] * trade_count
trade_time_increment = timedelta(days=1)
sim_params = SimulationParameters(
period_start=start_dt,
period_end=end_dt
)
benchmark_events = benchmark_events_in_range(sim_params)
trade_history = factory.create_trade_history(
sid,
price_list,
volume,
trade_time_increment,
sim_params,
source_id="factory1"
)
sid2 = 134
price2 = 12.12
price2_list = [price2] * trade_count
trade_history2 = factory.create_trade_history(
sid2,
price2_list,
volume,
trade_time_increment,
sim_params,
source_id="factory2"
)
# 'middle' start of 3 depends on number of days == 7
middle = 3
# First delete from middle
if days_to_delete.middle:
del trade_history[middle:(middle + days_to_delete.middle)]
del trade_history2[middle:(middle + days_to_delete.middle)]
# Delete start
if days_to_delete.start:
del trade_history[:days_to_delete.start]
del trade_history2[:days_to_delete.start]
# Delete from end
if days_to_delete.end:
del trade_history[-days_to_delete.end:]
del trade_history2[-days_to_delete.end:]
sim_params.first_open = \
sim_params.calculate_first_open()
sim_params.last_close = \
sim_params.calculate_last_close()
sim_params.capital_base = 1000.0
sim_params.frame_index = [
'sid',
'volume',
'dt',
'price',
'changed']
perf_tracker = perf.PerformanceTracker(
sim_params
)
events = date_sorted_sources(trade_history, trade_history2)
events = [event for event in
self.trades_with_txns(events, trade_history[0].dt)]
# Extract events with transactions to use for verification.
txns = [event for event in
events if event.type == zp.DATASOURCE_TYPE.TRANSACTION]
orders = [event for event in
events if event.type == zp.DATASOURCE_TYPE.ORDER]
all_events = date_sorted_sources(events, benchmark_events)
filtered_events = [filt_event for filt_event
in all_events if filt_event.dt <= end_dt]
filtered_events.sort(key=lambda x: x.dt)
grouped_events = itertools.groupby(filtered_events, lambda x: x.dt)
perf_messages = []
for date, group in grouped_events:
for event in group:
if event.type == zp.DATASOURCE_TYPE.TRADE:
perf_tracker.process_trade(event)
elif event.type == zp.DATASOURCE_TYPE.ORDER:
perf_tracker.process_order(event)
elif event.type == zp.DATASOURCE_TYPE.BENCHMARK:
perf_tracker.process_benchmark(event)
elif event.type == zp.DATASOURCE_TYPE.TRANSACTION:
perf_tracker.process_transaction(event)
msg = perf_tracker.handle_market_close_daily()
perf_messages.append(msg)
self.assertEqual(perf_tracker.txn_count, len(txns))
self.assertEqual(perf_tracker.txn_count, len(orders))
positions = perf_tracker.cumulative_performance.positions
if len(txns) == 0:
self.assertNotIn(sid, positions)
else:
expected_size = len(txns) / 2 * -25
cumulative_pos = positions[sid]
self.assertEqual(cumulative_pos.amount, expected_size)
self.assertEqual(len(perf_messages),
sim_params.days_in_period)
check_perf_tracker_serialization(perf_tracker)
def trades_with_txns(self, events, no_txn_dt):
for event in events:
# create a transaction for all but
# first trade in each sid, to simulate None transaction
if event.dt != no_txn_dt:
order = Order(
sid=event.sid,
amount=-25,
dt=event.dt
)
order.source_id = 'MockOrderSource'
yield order
yield event
txn = Transaction(
sid=event.sid,
amount=-25,
dt=event.dt,
price=10.0,
commission=0.50,
order_id=order.id
)
txn.source_id = 'MockTransactionSource'
yield txn
else:
yield event
@with_environment()
def test_minute_tracker(self, env=None):
""" Tests minute performance tracking."""
start_dt = env.exchange_dt_in_utc(datetime(2013, 3, 1, 9, 31))
end_dt = env.exchange_dt_in_utc(datetime(2013, 3, 1, 16, 0))
sim_params = SimulationParameters(
period_start=start_dt,
period_end=end_dt,
emission_rate='minute'
)
tracker = perf.PerformanceTracker(sim_params)
foosid = 1
barsid = 2
env.update_asset_finder(identifiers=[foosid, barsid])
foo_event_1 = factory.create_trade(foosid, 10.0, 20, start_dt)
order_event_1 = Order(sid=foo_event_1.sid,
amount=-25,
dt=foo_event_1.dt)
bar_event_1 = factory.create_trade(barsid, 100.0, 200, start_dt)
txn_event_1 = Transaction(sid=foo_event_1.sid,
amount=-25,
dt=foo_event_1.dt,
price=10.0,
commission=0.50,
order_id=order_event_1.id)
benchmark_event_1 = Event({
'dt': start_dt,
'returns': 0.01,
'type': zp.DATASOURCE_TYPE.BENCHMARK
})
foo_event_2 = factory.create_trade(
foosid, 11.0, 20, start_dt + timedelta(minutes=1))
bar_event_2 = factory.create_trade(
barsid, 11.0, 20, start_dt + timedelta(minutes=1))
benchmark_event_2 = Event({
'dt': start_dt + timedelta(minutes=1),
'returns': 0.02,
'type': zp.DATASOURCE_TYPE.BENCHMARK
})
events = [
foo_event_1,
order_event_1,
benchmark_event_1,
txn_event_1,
bar_event_1,
foo_event_2,
benchmark_event_2,
bar_event_2,
]
grouped_events = itertools.groupby(
events, operator.attrgetter('dt'))
messages = {}
for date, group in grouped_events:
tracker.set_date(date)
for event in group:
if event.type == zp.DATASOURCE_TYPE.TRADE:
tracker.process_trade(event)
elif event.type == zp.DATASOURCE_TYPE.BENCHMARK:
tracker.process_benchmark(event)
elif event.type == zp.DATASOURCE_TYPE.ORDER:
tracker.process_order(event)
elif event.type == zp.DATASOURCE_TYPE.TRANSACTION:
tracker.process_transaction(event)
msg, _ = tracker.handle_minute_close(date)
messages[date] = msg
self.assertEquals(2, len(messages))
msg_1 = messages[foo_event_1.dt]
msg_2 = messages[foo_event_2.dt]
self.assertEquals(1, len(msg_1['minute_perf']['transactions']),
"The first message should contain one "
"transaction.")
# Check that transactions aren't emitted for previous events.
self.assertEquals(0, len(msg_2['minute_perf']['transactions']),
"The second message should have no "
"transactions.")
self.assertEquals(1, len(msg_1['minute_perf']['orders']),
"The first message should contain one orders.")
# Check that orders aren't emitted for previous events.
self.assertEquals(0, len(msg_2['minute_perf']['orders']),
"The second message should have no orders.")
# Ensure that period_close moves through time.
# Also, ensure that the period_closes are the expected dts.
self.assertEquals(foo_event_1.dt,
msg_1['minute_perf']['period_close'])
self.assertEquals(foo_event_2.dt,
msg_2['minute_perf']['period_close'])
# In this test event1 transactions arrive on the first bar.
# This leads to no returns as the price is constant.
# Sharpe ratio cannot be computed and is None.
# In the second bar we can start establishing a sharpe ratio.
self.assertIsNone(msg_1['cumulative_risk_metrics']['sharpe'])
self.assertIsNotNone(msg_2['cumulative_risk_metrics']['sharpe'])
check_perf_tracker_serialization(tracker)
@with_environment()
def test_close_position_event(self, env=None):
env.update_asset_finder(identifiers=[1, 2])
pt = perf.PositionTracker()
dt = pd.Timestamp("1984/03/06 3:00PM")
pos1 = perf.Position(1, amount=np.float64(120.0),
last_sale_date=dt, last_sale_price=3.4)
pos2 = perf.Position(2, amount=np.float64(-100.0),
last_sale_date=dt, last_sale_price=3.4)
pt.update_positions({1: pos1, 2: pos2})
event_type = DATASOURCE_TYPE.CLOSE_POSITION
index = [dt + timedelta(days=1)]
pan = pd.Panel({1: pd.DataFrame({'price': 1, 'volume': 0,
'type': event_type}, index=index),
2: pd.DataFrame({'price': 1, 'volume': 0,
'type': event_type}, index=index),
3: pd.DataFrame({'price': 1, 'volume': 0,
'type': event_type}, index=index)})
source = DataPanelSource(pan)
for i, event in enumerate(source):
txn = pt.maybe_create_close_position_transaction(event)
if event.sid == 1:
# Test owned long
self.assertEqual(-120, txn.amount)
elif event.sid == 2:
# Test owned short
self.assertEqual(100, txn.amount)
elif event.sid == 3:
# Test not-owned SID
self.assertIsNone(txn)
def test_handle_sid_removed_from_universe(self):
# post some trades in the market
sim_params, _, _ = create_random_simulation_parameters()
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
sim_params
)
# Create a tracker and a dividend
perf_tracker = perf.PerformanceTracker(sim_params)
dividend = factory.create_dividend(
1,
10.00,
# declared date, when the algorithm finds out about
# the dividend
events[0].dt,
# ex_date, the date before which the algorithm must hold stock
# to receive the dividend
events[1].dt,
# pay date, when the algorithm receives the dividend.
events[2].dt
)
dividend_frame = pd.DataFrame(
[dividend.to_series(index=zp.DIVIDEND_FIELDS)],
)
perf_tracker.update_dividends(dividend_frame)
# Ensure that the dividend is in the tracker
self.assertIn(1, perf_tracker.dividend_frame['sid'].values)
# Inform the tracker that sid 1 has been removed from the universe
perf_tracker.handle_sid_removed_from_universe(1)
# Ensure that the dividend for sid 1 has been removed from dividend
# frame
self.assertNotIn(1, perf_tracker.dividend_frame['sid'].values)
def test_serialization(self):
start_dt = datetime(year=2008,
month=10,
day=9,
tzinfo=pytz.utc)
end_dt = datetime(year=2008,
month=10,
day=16,
tzinfo=pytz.utc)
sim_params = SimulationParameters(
period_start=start_dt,
period_end=end_dt
)
perf_tracker = perf.PerformanceTracker(
sim_params
)
check_perf_tracker_serialization(perf_tracker)
class TestPosition(unittest.TestCase):
def setUp(self):
pass
def test_serialization(self):
dt = pd.Timestamp("1984/03/06 3:00PM")
pos = perf.Position(10, amount=np.float64(120.0), last_sale_date=dt,
last_sale_price=3.4)
p_string = pickle.dumps(pos)
test = pickle.loads(p_string)
nt.assert_dict_equal(test.__dict__, pos.__dict__)
class TestPositionTracker(unittest.TestCase):
def setUp(self):
pass
def test_empty_positions(self):
"""
make sure all the empty position stats return a numeric 0
Originally this bug was due to np.dot([], []) returning
np.bool_(False)
"""
pt = perf.PositionTracker()
stats = [
'calculate_positions_value',
'_net_exposure',
'_gross_value',
'_gross_exposure',
'_short_value',
'_short_exposure',
'_shorts_count',
'_long_value',
'_long_exposure',
'_longs_count',
]
for name in stats:
meth = getattr(pt, name)
val = meth()
self.assertEquals(val, 0)
self.assertNotIsInstance(val, (bool, np.bool_))
@with_environment()
def test_update_last_sale(self, env=None):
metadata = {1: {'asset_type': 'equity'},
2: {'asset_type': 'future',
'contract_multiplier': 1000}}
asset_finder = AssetFinder()
env.update_asset_finder(
asset_finder=asset_finder,
asset_metadata=metadata)
pt = perf.PositionTracker()
dt = pd.Timestamp("1984/03/06 3:00PM")
pos1 = perf.Position(1, amount=np.float64(100.0),
last_sale_date=dt, last_sale_price=10)
pos2 = perf.Position(2, amount=np.float64(100.0),
last_sale_date=dt, last_sale_price=10)
pt.update_positions({1: pos1, 2: pos2})
event1 = Event({'sid': 1,
'price': 11,
'dt': dt})
event2 = Event({'sid': 2,
'price': 11,
'dt': dt})
# Check cash-adjustment return value
self.assertEqual(0, pt.update_last_sale(event1))
self.assertEqual(100000, pt.update_last_sale(event2))
@with_environment()
def test_position_values_and_exposures(self, env=None):
metadata = {1: {'asset_type': 'equity'},
2: {'asset_type': 'equity'},
3: {'asset_type': 'future',
'contract_multiplier': 1000},
4: {'asset_type': 'future',
'contract_multiplier': 1000}}
env.update_asset_finder(asset_metadata=metadata)
pt = perf.PositionTracker()
dt = pd.Timestamp("1984/03/06 3:00PM")
pos1 = perf.Position(1, amount=np.float64(10.0),
last_sale_date=dt, last_sale_price=10)
pos2 = perf.Position(2, amount=np.float64(-20.0),
last_sale_date=dt, last_sale_price=10)
pos3 = perf.Position(3, amount=np.float64(30.0),
last_sale_date=dt, last_sale_price=10)
pos4 = perf.Position(4, amount=np.float64(-40.0),
last_sale_date=dt, last_sale_price=10)
pt.update_positions({1: pos1, 2: pos2, 3: pos3, 4: pos4})
# Test long-only methods
self.assertEqual(100, pt._long_value())
self.assertEqual(100 + 300000, pt._long_exposure())
# Test short-only methods
self.assertEqual(-200, pt._short_value())
self.assertEqual(-200 - 400000, pt._short_exposure())
# Test gross and net values
self.assertEqual(100 + 200, pt._gross_value())
self.assertEqual(100 - 200, pt._net_value())
# Test gross and net exposures
self.assertEqual(100 + 200 + 300000 + 400000, pt._gross_exposure())
self.assertEqual(100 - 200 + 300000 - 400000, pt._net_exposure())
@with_environment()
def test_serialization(self, env=None):
metadata = {1: {'asset_type': 'equity'},
2: {'asset_type': 'future',
'contract_multiplier': 1000}}
env.update_asset_finder(asset_metadata=metadata)
pt = perf.PositionTracker()
dt = pd.Timestamp("1984/03/06 3:00PM")
pos1 = perf.Position(1, amount=np.float64(120.0),
last_sale_date=dt, last_sale_price=3.4)
pos2 = perf.Position(2, amount=np.float64(100.0),
last_sale_date=dt, last_sale_price=3.4)
pt.update_positions({1: pos1, 2: pos2})
p_string = pickle.dumps(pt)
test = pickle.loads(p_string)
nt.assert_dict_equal(test._position_amounts, pt._position_amounts)
nt.assert_dict_equal(test._position_last_sale_prices,
pt._position_last_sale_prices)
nt.assert_count_equal(test.positions.keys(), pt.positions.keys())
for sid in pt.positions:
nt.assert_dict_equal(test.positions[sid].__dict__,
pt.positions[sid].__dict__)
class TestPerformancePeriod(unittest.TestCase):
def setUp(self):
pass
def test_serialization(self):
pt = perf.PositionTracker()
pp = perf.PerformancePeriod(100)
pp.position_tracker = pt
p_string = pickle.dumps(pp)
test = pickle.loads(p_string)
correct = pp.__dict__.copy()
del correct['_position_tracker']
nt.assert_count_equal(test.__dict__.keys(), correct.keys())
equal_keys = list(correct.keys())
equal_keys.remove('_account_store')
equal_keys.remove('_portfolio_store')
for k in equal_keys:
nt.assert_equal(test.__dict__[k], correct[k])
| apache-2.0 |
agrimaldi/metaseq | metaseq/results_table.py | 3 | 40294 | import copy
from textwrap import dedent
import numpy as np
import pandas
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.patches as patches
import matplotlib
import plotutils
from matplotlib.transforms import blended_transform_factory
from matplotlib.collections import EventCollection
import gffutils
import pybedtools
from pybedtools import featurefuncs
_base_doc = """%s
The underlying pandas.DataFrame is always available with the `data`
attribute.
Any attributes not explicitly in this class will be looked for in the
underlying pandas.DataFrame.
Parameters
----------
data : string or pandas.DataFrame
If string, assumes it's a filename and calls
pandas.read_table(data, **import_kwargs).
db : string or gffutils.FeatureDB
Optional database that can be used to generate features
import_kwargs : dict
These arguments will be passed to pandas.read_table() if `data` is
a filename.
"""
class ResultsTable(object):
__doc__ = _base_doc % dedent(
"""
Wrapper around a pandas.DataFrame that adds additional functionality.
""")
def __init__(self, data, db=None, import_kwargs=None):
if isinstance(data, basestring):
import_kwargs = import_kwargs or {}
data = pandas.read_table(data, **import_kwargs)
if not isinstance(data, pandas.DataFrame):
raise ValueError("`data` is not a pandas.DataFrame")
self.data = data
self._kwargs = dict(db=db, import_kwargs=import_kwargs)
self.attach_db(db)
self._cached_features = None
def __getattr__(self, attr):
if attr in self.__dict__:
return getattr(self, attr)
return getattr(self.data, attr)
def __getitem__(self, attr):
if isinstance(attr, basestring):
return self.data.__getitem__(attr)
else:
return self.__class__(self.data.__getitem__(attr), **self._kwargs)
def update(self, dataframe):
"""
Updates the current data with a new dataframe.
This extra step is required to get around the fancy pandas.DataFrame
indexing (like .ix, .iloc, etc).
"""
return self.__class__(dataframe, **self._kwargs)
def copy(self):
data = self.data.copy(deep=True)
return self.__class__(data, db=self.db, import_kwargs=self._kwargs)
def __repr__(self):
s = []
s.append("<%s instance, wrapping the following:"
% self.__class__.__name__)
s.append('')
s.extend('\t' + i for i in repr(self.data).splitlines(False))
s.append('>')
return '\n'.join(s)
def attach_db(self, db):
"""
Attach a gffutils.FeatureDB for access to features.
Useful if you want to attach a db after this instance has already been
created.
Parameters
----------
db : gffutils.FeatureDB
"""
if db is not None:
if isinstance(db, basestring):
db = gffutils.FeatureDB(db)
if not isinstance(db, gffutils.FeatureDB):
raise ValueError(
"`db` must be a filename or a gffutils.FeatureDB")
self._kwargs['db'] = db
self.db = db
def features(self, ignore_unknown=False):
"""
Generator of features.
If a gffutils.FeatureDB is attached, returns a pybedtools.Interval for
every feature in the dataframe's index.
Parameters
----------
ignore_unknown : bool
If True, silently ignores features that are not found in the db.
"""
if not self.db:
raise ValueError("Please attach a gffutils.FeatureDB")
for i in self.data.index:
try:
yield gffutils.helpers.asinterval(self.db[i])
except gffutils.FeatureNotFoundError:
if ignore_unknown:
continue
else:
raise gffutils.FeatureNotFoundError('%s not found' % i)
def reindex_to(self, x, attribute="Name"):
"""
Returns a copy that only has rows corresponding to feature names in x.
Parameters
----------
x : str or pybedtools.BedTool
BED, GFF, GTF, or VCF where the "Name" field (that is, the value
returned by feature['Name']) or any arbitrary attribute
attribute : str
Attribute containing the name of the feature to use as the index.
"""
names = [i[attribute] for i in x]
new = self.copy()
new.data = new.data.reindex(names)
return new
def five_prime(self, upstream=1, downstream=0):
"""
Creates a BED/GFF file of the 5' end of each feature represented in the
table and returns the resulting pybedtools.BedTool object. Needs an
attached database.
Parameters
----------
upstream, downstream : int
Number of basepairs up and downstream to include
"""
return pybedtools.BedTool(self.features())\
.each(featurefuncs.five_prime, upstream, downstream)\
.saveas()
def three_prime(self, upstream=0, downstream=1):
"""
Creates a BED/GFF file of the 3' end of each feature represented in the
table and returns the resulting pybedtools.BedTool object. Needs an
attached database.
Parameters
----------
upstream, downstream : int
Number of basepairs up and downstream to include
"""
return pybedtools.BedTool(self.features())\
.each(featurefuncs.three_prime, upstream, downstream)\
.saveas()
TSS = five_prime
TTS = three_prime
def align_with(self, other):
"""
Align the dataframe's index with another.
"""
return self.__class__(self.data.reindex_like(other), **self._kwargs)
def genes_in_common(self, other):
"""
Convenience method for getting the genes found in both dataframes.
"""
return self.index & other.index
def __and__(self, other):
return self.index & other.index
def __or__(self, other):
return self.index | other.index
def __sub__(self, other):
return self.index - other.index
def __len__(self):
return len(self.data)
def scatter(self, x, y, xfunc=None, yfunc=None, xscale=None, yscale=None,
xlab=None, ylab=None, genes_to_highlight=None,
label_genes=False, marginal_histograms=False,
general_kwargs=dict(color="k", alpha=0.2, picker=True),
general_hist_kwargs=None, offset_kwargs={}, label_kwargs=None,
ax=None, one_to_one=None, callback=None, xlab_prefix=None,
ylab_prefix=None, sizefunc=None, hist_size=0.3, hist_pad=0.0,
nan_offset=0.015, pos_offset=0.99, linelength=0.01,
neg_offset=0.005, figure_kwargs=None):
"""
Do-it-all method for making annotated scatterplots.
Parameters
----------
x, y : array-like
Variables to plot. Must be names in self.data's DataFrame. For
example, "baseMeanA" and "baseMeanB"
xfunc, yfunc : callable
Functions to apply to `xvar` and `yvar` respectively. Default is
log2; set to None to have no transformation.
xlab, ylab : string
Labels for x and y axes; default is to use function names for
`xfunc` and `yfunc` and variable names `xvar` and `yvar`, e.g.,
"log2(baseMeanA)"
ax : None or Axes object
If `ax=None`, then makes a new fig and returns the Axes object,
otherwise, plots onto `ax`
general_kwargs : dict
Kwargs for matplotlib.scatter; specifies how all points look
genes_to_highlight : list of (index, dict) tuples
Provides lots of control to colors. It is a list of (`ind`,
`kwargs`) tuples, where each `ind` specifies genes to plot with
`kwargs`. Each dictionary updates a copy of `general_kwargs`. If
`genes_to_highlight` has a "name" kwarg, this must be a list that't
the same length as `ind`. It will be used to label the genes in
`ind` using `label_kwargs`.
callback : callable
Function to call upon clicking a point. Must accept a single
argument which is the gene ID. Default is to print the gene name,
but an example of another useful callback would be a mini-browser
connected to a genomic_signal object from which the expression data
were calculated.
one_to_one : None or dict
If not None, a dictionary of matplotlib.plot kwargs that will be
used to plot a 1:1 line.
label_kwargs : dict
Kwargs for labeled genes (e.g., dict=(style='italic')). Will only
be used if an entry in `genes_to_highlight` has a `name` key.
offset_kwargs : dict
Kwargs to be passed to matplotlib.transforms.offset_copy, used for
adjusting the positioning of gene labels in relation to the actual
point.
xlab_prefix, ylab_prefix : str
Optional label prefix that will be added to the beginning of `xlab`
and/or `ylab`.
hist_size : float
Size of marginal histograms
hist_pad : float
Spacing between marginal histograms
nan_offset, pos_offset, neg_offset : float
Offset, in units of "fraction of axes" for the NaN, +inf, and -inf
"rug plots"
linelength : float
Line length for the rug plots
"""
_x = self.data[x]
_y = self.data[y]
# Construct defaults---------------------------------------------------
def identity(x):
return x.copy()
# Axis label setup
if xlab_prefix is None:
xlab_prefix = ""
if ylab_prefix is None:
ylab_prefix = ""
if xlab is None:
xlab = x
if xfunc is not None:
xlab = xlab_prefix + "%s(%s)" % (xfunc.__name__, str(x))
else:
xlab = xlab_prefix + "%s" % (str(x))
if ylab is None:
ylab = y
if yfunc is not None:
ylab = ylab_prefix + "%s(%s)" % (yfunc.__name__, str(y))
else:
ylab = ylab_prefix + "%s" % (str(y))
if xfunc is None:
xfunc = identity
if yfunc is None:
yfunc = identity
if general_kwargs is None:
general_kwargs = {}
if general_hist_kwargs is None:
general_hist_kwargs = {}
if genes_to_highlight is None:
genes_to_highlight = []
if ax is None:
if figure_kwargs is None:
figure_kwargs = {}
fig = plt.figure(**figure_kwargs)
ax = fig.add_subplot(111)
if label_kwargs is None:
label_kwargs = dict(
horizontalalignment='right',
verticalalignment='center',
style='italic',
bbox=dict(facecolor='w', edgecolor='None', alpha=0.5)
)
# Clean data ---------------------------------------------------------
xi = xfunc(_x)
yi = yfunc(_y)
# handle inf, -inf, and NaN
x_is_pos_inf = np.isinf(xi) & (xi > 0)
x_is_neg_inf = np.isinf(xi) & (xi < 0)
x_is_nan = np.isnan(xi)
y_is_pos_inf = np.isinf(yi) & (yi > 0)
y_is_neg_inf = np.isinf(yi) & (yi < 0)
y_is_nan = np.isnan(yi)
# Indexes for valid values
x_valid = ~(x_is_pos_inf | x_is_neg_inf | x_is_nan)
y_valid = ~(y_is_pos_inf | y_is_neg_inf | y_is_nan)
# global min/max
gmin = max(xi[x_valid].min(), yi[y_valid].min())
gmax = min(xi[x_valid].max(), yi[y_valid].max())
# Convert any integer indexes into boolean, and create a new list of
# genes to highlight. This handles optional hist kwargs.
# We'll compile a new list of genes to highlight.
_genes_to_highlight = []
for block in genes_to_highlight:
ind = block[0]
# Convert to boolean
if ind.dtype != 'bool':
new_ind = (np.zeros_like(xi) == 0)
new_ind[ind] = True
_genes_to_highlight.append(
tuple([new_ind] + list(block[1:]))
)
# If it's a DataFrame, we only want the boolean values;
else:
if hasattr(ind, 'values'):
ind = ind.values
_genes_to_highlight.append(
tuple([ind] + list(block[1:]))
)
# Now we remove any genes from in allind (which will be plotted using
# `general_kwargs`) that will be plotted by genes_to_highlight. This
# avoids double-plotting.
allind = np.zeros_like(xi) == 0
for block in _genes_to_highlight:
ind = block[0]
allind[ind] = False
# Copy over the color and alpha if they're not specified
general_hist_kwargs = plotutils._updatecopy(
orig=general_hist_kwargs, update_with=general_kwargs,
keys=['color', 'alpha'])
# Put the non-highlighted genes at the beginning of _genes_to_highlight
# list so we can just iterate over one list
_genes_to_highlight.insert(
0,
(allind, general_kwargs, general_hist_kwargs)
)
# Set up the object that will handle the marginal histograms
self.marginal = plotutils.MarginalHistScatter(
ax, hist_size=hist_size, pad=hist_pad)
# Set up kwargs for x and y rug plots
rug_x_kwargs = dict(
linelength=linelength,
transform=blended_transform_factory(ax.transData, ax.transAxes),
)
rug_y_kwargs = dict(
linelength=linelength,
transform=blended_transform_factory(ax.transAxes, ax.transData),
orientation='vertical',
)
# EventCollection objects need a color as a 3-tuple, so set up
# a converter here.
color_converter = matplotlib.colors.ColorConverter().to_rgb
# Plot the one-to-one line, if kwargs were specified
if one_to_one:
ax.plot([gmin, gmax],
[gmin, gmax],
**one_to_one)
# Plot 'em all, and label if specified
# In order to avoid calling the callback function multiple times when
# we have overlapping genes to highlight (e.g., a gene that is both
# upregulated AND has a peak), keep track of everything that's been
# added so far.
self._seen = np.ones_like(xi) == 0
for block in _genes_to_highlight:
ind = block[0]
kwargs = block[1]
if len(block) == 3:
hist_kwargs = block[2]
else:
hist_kwargs = {}
names = kwargs.pop('names', None)
_marginal_histograms = (
kwargs.pop('marginal_histograms', False) or
marginal_histograms)
updated_kwargs = plotutils._updatecopy(
orig=kwargs, update_with=general_kwargs)
updated_hist_kwargs = plotutils._updatecopy(
orig=hist_kwargs, update_with=general_hist_kwargs)
updated_hist_kwargs = plotutils._updatecopy(
orig=updated_hist_kwargs, update_with=kwargs,
keys=['color', 'alpha'], override=True)
xhist_kwargs = updated_kwargs.pop('xhist_kwargs', None)
yhist_kwargs = updated_kwargs.pop('yhist_kwargs', None)
self.marginal.append(
xi[ind & x_valid & y_valid],
yi[ind & x_valid & y_valid],
scatter_kwargs=dict(**updated_kwargs),
hist_kwargs=updated_hist_kwargs,
xhist_kwargs=xhist_kwargs,
yhist_kwargs=yhist_kwargs,
marginal_histograms=_marginal_histograms,
)
# This is important for callbacks: here we grab the last-created
# collection,
coll = self.marginal.scatter_ax.collections[-1]
coll.df = self.data
coll.ind = ind & x_valid & y_valid
color = color_converter(updated_kwargs['color'])
rug_x_kwargs['color'] = color
rug_y_kwargs['color'] = color
# Note: if both x and y are not valid, then they will not be on the
# plot.
items = [
# top rug, y is +inf and x is valid
(xi, ind & x_valid & y_is_pos_inf, pos_offset, rug_x_kwargs),
# one of the bottom rugs, where y is NaN
(xi, ind & x_valid & y_is_nan, nan_offset, rug_x_kwargs),
# bottom rug, y is -inf
(xi, ind & x_valid & y_is_neg_inf, neg_offset, rug_x_kwargs),
# right rug, x is +inf
(yi, ind & y_valid & x_is_pos_inf, pos_offset, rug_y_kwargs),
# one of the left rugs; x is NaN
(yi, ind & y_valid & x_is_nan, nan_offset, rug_y_kwargs),
# left rug, x is -inf
(yi, ind & y_valid & x_is_neg_inf, neg_offset, rug_y_kwargs),
]
for values, index, offset, kwargs in items:
coll = EventCollection(
values[index], lineoffset=offset, **kwargs)
coll.df = self.data
coll.ind = index
ax.add_collection(coll)
if names:
transOffset = matplotlib.transforms.offset_copy(
ax.transData, fig=ax.figure, **offset_kwargs)
for xii, yii, name in zip(xi[ind], yi[ind], names):
ax.text(xii,
yii,
name,
transform=transOffset,
**label_kwargs)
# register callback
if callback is None:
callback = self._default_callback
def wrapped_callback(event):
for _id in self._id_callback(event):
callback(_id)
ax.figure.canvas.mpl_connect('pick_event', wrapped_callback)
ax.set_xlabel(xlab)
ax.set_ylabel(ylab)
ax.axis('tight')
return ax
def radviz(self, column_names, transforms=dict(), **kwargs):
"""
Radviz plot.
Useful for exploratory visualization, a radviz plot can show
multivariate data in 2D. Conceptually, the variables (here, specified
in `column_names`) are distributed evenly around the unit circle. Then
each point (here, each row in the dataframe) is attached to each
variable by a spring, where the stiffness of the spring is proportional
to the value of corresponding variable. The final position of a point
represents the equilibrium position with all springs pulling on it.
In practice, each variable is normalized to 0-1 (by subtracting the
mean and dividing by the range).
This is a very exploratory plot. The order of `column_names` will
affect the results, so it's best to try a couple different orderings.
For other caveats, see [1].
Additional kwargs are passed to self.scatter, so subsetting, callbacks,
and other configuration can be performed using options for that method
(e.g., `genes_to_highlight` is particularly useful).
Parameters
----------
column_names : list
Which columns of the dataframe to consider. The columns provided
should only include numeric data, and they should not contain any
NaN, inf, or -inf values.
transforms : dict
Dictionary mapping column names to transformations that will be
applied just for the radviz plot. For example, np.log1p is
a useful function. If a column name is not in this dictionary, it
will be used as-is.
ax : matplotlib.Axes
If not None, then plot the radviz on this axes. If None, then
a new figure will be created.
kwargs : dict
Additional arguments are passed to self.scatter. Note that not all
possible kwargs for self.scatter are necessarily useful for
a radviz plot (for example, margninal histograms would not be
meaningful).
Notes
-----
This method adds two new variables to self.data: "radviz_x" and
"radviz_y". It then calls the self.scatter method, using these new
variables.
The data transformation was adapted from the
pandas.tools.plotting.radviz function.
References
----------
[1] Hoffman,P.E. et al. (1997) DNA visual and analytic data mining. In
the Proceedings of the IEEE Visualization. Phoenix, AZ, pp.
437-441.
[2] http://www.agocg.ac.uk/reports/visual/casestud/brunsdon/radviz.htm
[3] http://pandas.pydata.org/pandas-docs/stable/visualization.html\
#radviz
"""
# make a copy of data
x = self.data[column_names].copy()
for k, v in transforms.items():
x[k] = v(x[k])
def normalize(series):
mn = min(series)
mx = max(series)
return (series - mn) / (mx - mn)
df = x.apply(normalize)
to_plot = []
n = len(column_names)
s = np.array([(np.cos(t), np.sin(t))
for t in [2.0 * np.pi * (i / float(n))
for i in range(n)]])
for i in range(len(x)):
row = df.irow(i).values
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
to_plot.append((s * row_).sum(axis=0) / row.sum())
x_, y_ = zip(*to_plot)
self.data['radviz_x'] = x_
self.data['radviz_y'] = y_
ax = self.scatter('radviz_x', 'radviz_y', **kwargs)
ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor='none'))
for xy, name in zip(s, column_names):
ax.add_patch(patches.Circle(xy, radius=0.025, facecolor='gray'))
if xy[0] < 0.0 and xy[1] < 0.0:
ax.text(xy[0] - 0.025, xy[1] - 0.025, name,
ha='right', va='top', size='small')
elif xy[0] < 0.0 and xy[1] >= 0.0:
ax.text(xy[0] - 0.025, xy[1] + 0.025, name,
ha='right', va='bottom', size='small')
elif xy[0] >= 0.0 and xy[1] < 0.0:
ax.text(xy[0] + 0.025, xy[1] - 0.025, name,
ha='left', va='top', size='small')
elif xy[0] >= 0.0 and xy[1] >= 0.0:
ax.text(xy[0] + 0.025, xy[1] + 0.025, name,
ha='left', va='bottom', size='small')
ax.axis('equal')
return ax
def _id_callback(self, event):
# event.ind is the index into event's x and y data.
#
# event.artist.ind is the index of the entire artist into the original
# dataframe.
subset_df = event.artist.df.ix[event.artist.ind]
for i in event.ind:
_id = subset_df.index[i]
yield _id
def _default_callback(self, i):
print self.data.ix[i]
def strip_unknown_features(self):
"""
Remove features not found in the `gffutils.FeatureDB`. This will
typically include 'ambiguous', 'no_feature', etc, but can also be
useful if the database was created from a different one than was used
to create the table.
"""
if not self.db:
return self
ind = []
for i, gene_id in enumerate(self.data.index):
try:
self.db[gene_id]
ind.append(i)
except gffutils.FeatureNotFoundError:
pass
ind = np.array(ind)
return self.__class__(self.data.ix[ind], **self._kwargs)
def genes_with_peak(self, peaks, transform_func=None, split=False,
intersect_kwargs=None, id_attribute='ID', *args,
**kwargs):
"""
Returns a boolean index of genes that have a peak nearby.
Parameters
----------
peaks : string or pybedtools.BedTool
If string, then assume it's a filename to a BED/GFF/GTF file of
intervals; otherwise use the pybedtools.BedTool object directly.
transform_func : callable
This function will be applied to each gene object returned by
self.features(). Additional args and kwargs are passed to
`transform_func`. For example, if you're looking for peaks within
1kb upstream of TSSs, then pybedtools.featurefuncs.TSS would be
a useful `transform_func`, and you could supply additional kwargs
of `upstream=1000` and `downstream=0`.
This function can return iterables of features, too. For example,
you might want to look for peaks falling within the exons of
a gene. In this case, `transform_func` should return an iterable
of pybedtools.Interval objects. The only requirement is that the
`name` field of any feature matches the index of the dataframe.
intersect_kwargs : dict
kwargs passed to pybedtools.BedTool.intersect.
id_attribute : str
The attribute in the GTF or GFF file that contains the id of the
gene. For meaningful results to be returned, a gene's ID be also
found in the index of the dataframe.
For GFF files, typically you'd use `id_attribute="ID"`. For GTF
files, you'd typically use `id_attribute="gene_id"`.
"""
def _transform_func(x):
"""
In order to support transform funcs that return a single feature or
an iterable of features, we need to wrap it
"""
result = transform_func(x)
if isinstance(result, pybedtools.Interval):
result = [result]
for i in result:
if i:
yield result
intersect_kwargs = intersect_kwargs or {}
if not self._cached_features:
self._cached_features = pybedtools\
.BedTool(self.features())\
.saveas()
if transform_func:
if split:
features = self._cached_features\
.split(_transform_func, *args, **kwargs)
else:
features = self._cached_features\
.each(transform_func, *args, **kwargs)
else:
features = self._cached_features
hits = list(set([i[id_attribute] for i in features.intersect(
peaks, **intersect_kwargs)]))
return self.data.index.isin(hits)
class DifferentialExpressionResults(ResultsTable):
__doc__ = _base_doc % dedent("""
A ResultsTable subclass for working with differential expression results.
Adds methods for up/down regulation, ma_plot, and sets class variables for
which columns should be considered for pval, log fold change, and mean
values. This class acts as a parent for subclasses like DESeqResults,
EdgeRResults, and others/
""")
pval_column = 'padj'
lfc_column = 'log2FoldChange'
mean_column = 'baseMean'
def __init__(self, data, db=None, header_check=True, **kwargs):
import_kwargs = kwargs.pop('import_kwargs', {})
if header_check and isinstance(data, basestring):
comment_char = import_kwargs.get('comment', '#')
for i, line in enumerate(open(data)):
if line[0] != comment_char:
break
import_kwargs['skiprows'] = i
import_kwargs['na_values'] = ['nan']
import_kwargs['index_col'] = import_kwargs.pop('index_col', 0)
super(DifferentialExpressionResults, self).__init__(
data=data, db=db, import_kwargs=import_kwargs, **kwargs)
def changed(self, thresh=0.05, idx=True):
"""
Changed features.
{threshdoc}
"""
ind = self.data[self.pval_column] <= thresh
if idx:
return ind
return self[ind]
def unchanged(self, thresh=0.05, idx=True):
"""
Changed features.
{threshdoc}
"""
ind = (
(self.data[self.pval_column] > thresh)
| np.isnan(self.data[self.pval_column])
)
if idx:
return ind
return self[ind]
def enriched(self, thresh=0.05, idx=True):
"""
Enriched features.
{threshdoc}
"""
return self.upregulated(thresh=thresh, idx=idx)
def upregulated(self, thresh=0.05, idx=True):
"""
Upregulated features.
{threshdoc}
"""
ind = (
(self.data[self.pval_column] <= thresh)
& (self.data[self.lfc_column] > 0)
)
if idx:
return ind
return self[ind]
def downregulated(self, thresh=0.05, idx=True):
"""
Downregulated features.
{threshdoc}
"""
ind = (
(self.data[self.pval_column] <= thresh)
& (self.data[self.lfc_column] < 0)
)
if idx:
return ind
return self[ind]
def disenriched(self, thresh=0.05, idx=True):
"""
Disenriched features.
{threshdoc}
"""
return self.downregulated(thresh=thresh, idx=idx)
def ma_plot(self, thresh, up_kwargs=None, dn_kwargs=None,
zero_line=None, **kwargs):
"""
MA plot
Plots the average read count across treatments (x-axis) vs the log2
fold change (y-axis).
Additional kwargs are passed to self.scatter (useful ones might include
`genes_to_highlight`)
Parameters
----------
thresh : float
Features with values <= `thresh` will be highlighted in the plot.
up_kwargs, dn_kwargs : None or dict
Kwargs passed to matplotlib's scatter(), used for styling up/down
regulated features (defined by `thresh` and `col`)
zero_line : None or dict
Kwargs passed to matplotlib.axhline(0).
"""
genes_to_highlight = kwargs.pop('genes_to_highlight', [])
genes_to_highlight.append(
(self.upregulated(thresh),
up_kwargs or dict(color='r')))
genes_to_highlight.append(
(self.downregulated(thresh),
dn_kwargs or dict(color='b')))
if zero_line is None:
zero_line = {}
x = self.mean_column
y = self.lfc_column
if 'xfunc' not in kwargs:
kwargs['xfunc'] = np.log
ax = self.scatter(
x=x,
y=y,
genes_to_highlight=genes_to_highlight,
**kwargs)
if zero_line:
ax.axhline(0, **zero_line)
return ax
threshdoc = """
Parameters
----------
thresh : float
Only features with <= `thresh` will be returned
idx : bool
If True, a boolean index will be returned. If False, a new object will
be returned that has been subsetted.
"""
enriched.__doc__ = enriched.__doc__.format(threshdoc=threshdoc)
disenriched.__doc__ = disenriched.__doc__.format(threshdoc=threshdoc)
upregulated.__doc__ = upregulated.__doc__.format(threshdoc=threshdoc)
downregulated.__doc__ = downregulated.__doc__.format(threshdoc=threshdoc)
class EdgeRResults(DifferentialExpressionResults):
__doc__ = _base_doc % dedent(
"""
Class for working with results from edgeR.
Just like a DifferentialExpressionResults object, but sets the
pval_column, lfc_column, and mean_column to the names used in edgeR's
output.
""")
pval_column = 'FDR'
lfc_column = 'logFC'
mean_column = 'logCPM'
class DESeqResults(DifferentialExpressionResults):
__doc__ = _base_doc % dedent(
"""
Class for working with results from DESeq.
Just like a DifferentialExpressionResults object, but sets the
pval_column, lfc_column, and mean_column to the names used in edgeR's
output.
""")
def colormapped_bedfile(self, genome, cmap=None):
"""
Create a BED file with padj encoded as color
Features will be colored according to adjusted pval (phred
transformed). Downregulated features have the sign flipped.
Parameters
----------
cmap : matplotlib colormap
Default is matplotlib.cm.RdBu_r
Notes
-----
Requires a FeatureDB to be attached.
"""
if self.db is None:
raise ValueError("FeatureDB required")
db = gffutils.FeatureDB(self.db)
def scored_feature_generator(d):
for i in range(len(d)):
try:
feature = db[d.ix[i]]
except gffutils.FeatureNotFoundError:
raise gffutils.FeatureNotFoundError(d.ix[i])
score = -10 * np.log10(d.padj[i])
lfc = d.log2FoldChange[i]
if np.isnan(lfc):
score = 0
if lfc < 0:
score *= -1
feature.score = str(score)
feature = extend_fields(
gff2bed(gffutils.helpers.asinterval(feature)), 9)
fields = feature.fields[:]
fields[6] = fields[1]
fields[7] = fields[2]
fields.append(str(d.padj[i]))
fields.append(str(d.pval[i]))
fields.append('%.3f' % d.log2FoldChange[i])
fields.append('%.3f' % d.baseMeanB[i])
fields.append('%.3f' % d.baseMeanB[i])
yield pybedtools.create_interval_from_list(fields)
x = pybedtools.BedTool(scored_feature_generator(self)).saveas()
norm = x.colormap_normalize()
if cmap is None:
cmap = cm.RdBu_r
cmap = colormap_adjust.cmap_center_point_adjust(
cmap, [norm.vmin, norm.vmax], 0)
def score_zeroer(f):
f.score = '0'
return f
return x.each(add_color, cmap=cmap, norm=norm)\
.sort()\
.each(score_zeroer)\
.truncate_to_chrom(genome)\
.saveas()
def autosql_file(self):
"""
Generate the autosql for DESeq results (to create bigBed)
Returns a temp filename containing the autosql defining the extra
fields.
This for creating bigBed files from BED files created by
colormapped_bed. When a user clicks on a feature, the DESeq results
will be reported.
"""
fn = pybedtools.BedTool._tmp()
AUTOSQL = dedent(
"""
table example
"output from DESeq"
(
string chrom; "chromosome"
uint chromStart; "start coord"
uint chromEnd; "stop coord"
string name; "name of feature"
uint score; "always zero"
char[1] strand; "+ or - for strand"
uint thickStart; "Coding region start"
uint thickEnd; "Coding region end"
uint reserved; "color according to score"
string padj; "DESeq adjusted p value"
string pval; "DESeq raw p value"
string logfoldchange; "DESeq log2 fold change"
string basemeana; "DESeq baseMeanA"
string basemeanb; "DESeq baseMeanB"
)
""")
fout = open(fn, 'w')
fout.write(AUTOSQL)
fout.close()
return fn
class DESeq2Results(DESeqResults):
__doc__ = _base_doc % dedent(
"""
Class for working with results from DESeq2.
Just like a DifferentialExpressionResults object, but sets the
pval_column, lfc_column, and mean_column to the names used in edgeR's
output.
""")
pval_column = 'padj'
lfc_column = 'log2FoldChange'
mean_column = 'baseMean'
class LazyDict(object):
def __init__(self, fn_dict, dbfn, index_file, extra=None, cls=DESeqResults,
modifier=None):
"""
Dictionary-like object that lazily-loads ResultsTable objects.
Parameters
----------
fn_dict : dict
Keys of `fn_dict` will be the keys of this LazyDict object. Values
should be filenames which will be loaded into ResultsTable object
upon access for the first time.
index_file : str
Path to a file that contains one ID per line. This file is used to
ensure all ResultsTable objects are aligned to the same index.
dbfn : str
Filename to a gffutils database. This enables gene info to be
attached to the dataframe.
extra : pandas.dataframe
This dataframe hat will be merged into the data in each file. This
is useful for attaching things like gene lengths, alt names, etc.
In order for it to work, this dataframe must be indexed the same
way the ResultsTable files are indexed.
cls : ResultsTable class or subclass
Each filename in `fn_dict` will be converted using this class.
modifier : callable
Upon first access, each newly-constructed ResultsTable will first
have the `extra` data attached, and then will be provided as this
callable's only argument. The callable can make any modifications
to the ResultsTable, and return a new version that will be used in
the future when the same key is accessed. For example, exonic bp
data can be provided as part of the `extra` dataframe, and then the
`modifier` can be a function that adds an RPKM column.
Notes
-----
When a key is provided for the first time, the workflow is
ResultsTable(fn, **kwargs) -> attach `extra` -> send to `modifier` ->
return extended and modified ResultsTable. Subsequent access of the
same key will immediately return the extended-and-modified
ResultsTable.
"""
self.fn_dict = fn_dict
self._dict = {}
self.dbfn = dbfn
self.index = [i.strip() for i in open(index_file)]
if extra is not None:
self.extra = extra.ix[self.index]
else:
self.extra = extra
self.modifier = modifier
self._cls = cls
def __getitem__(self, key):
if key not in self._dict:
fn = self.fn_dict[key]
obj = self._cls(fn, db=self.dbfn)
obj.data = obj.data.ix[self.index]
if self.extra is not None:
obj.data = pandas.merge(obj.data, self.extra, left_index=True,
right_index=True)
if self.modifier:
obj = self.modifier(obj)
self._dict[key] = obj
return self._dict[key]
def __repr__(self):
s = "<%s> with possible keys\n:%s\n" \
% (self.__class__.__name__, self.fn_dict.keys())
s += "and existing keys:\n"
s += repr(self._dict)
return s
def keys(self):
return self.fn_dict.keys()
def values(self):
return [self._dict[key] for key in self.keys()]
def items(self):
return list((key, self._dict[key]) for key in self.keys())
| mit |
Srisai85/scipy | scipy/signal/waveforms.py | 64 | 14818 | # Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly']
def sawtooth(t, width=1):
"""
Return a periodic sawtooth or triangle waveform.
The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the
interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval
``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
Time.
width : array_like, optional
Width of the rising ramp as a proportion of the total cycle.
Default is 1, producing a rising ramp, while 0 produces a falling
ramp. `width` = 0.5 produces a triangle wave.
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500)
>>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))
"""
t, w = asarray(t), asarray(width)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# take t modulo 2*pi
tmod = mod(t, 2 * pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
tsub = extract(mask2, tmod)
wsub = extract(mask2, w)
place(y, mask2, tsub / (pi * wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
tsub = extract(mask3, tmod)
wsub = extract(mask3, w)
place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period ``2*pi``, has value +1 from 0 to
``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in
the interval [0,1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
The input time array.
duty : array_like, optional
Duty cycle. Default is 0.5 (50% duty cycle).
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the square waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500, endpoint=False)
>>> plt.plot(t, signal.square(2 * np.pi * 5 * t))
>>> plt.ylim(-2, 2)
A pulse-width modulated sine wave:
>>> plt.figure()
>>> sig = np.sin(2 * np.pi * t)
>>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2)
>>> plt.subplot(2, 1, 1)
>>> plt.plot(t, sig)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(t, pwm)
>>> plt.ylim(-1.5, 1.5)
"""
t, w = asarray(t), asarray(duty)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# on the interval 0 to duty*2*pi function is 1
tmod = mod(t, 2 * pi)
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
place(y, mask2, 1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
place(y, mask3, -1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False,
retenv=False):
"""
Return a Gaussian modulated sinusoid:
``exp(-a t^2) exp(1j*2*pi*fc*t).``
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (e.g. Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (e.g. Hz).
Default is 0.5.
bwr : float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
Returns
-------
yI : ndarray
Real part of signal. Always returned.
yQ : ndarray
Imaginary part of signal. Only returned if `retquad` is True.
yenv : ndarray
Envelope of signal. Only returned if `retenv` is True.
See Also
--------
scipy.signal.morlet
Examples
--------
Plot real component, imaginary component, and envelope for a 5 Hz pulse,
sampled at 100 Hz for 2 seconds:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 2 * 100, endpoint=False)
>>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True)
>>> plt.plot(t, i, t, q, t, e, '--')
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi * fc * bw) ** 2 / (4.0 * log(ref))
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref) / a)
yenv = exp(-a * t * t)
yI = yenv * cos(2 * pi * fc * t)
yQ = yenv * sin(2 * pi * fc * t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per unit';
there is no requirement here that the unit is one second. The
important distinction is that the units of rotation are cycles, not
radians. Likewise, `t` could be a measurement of space instead of time.
Parameters
----------
t : array_like
Times at which to evaluate the waveform.
f0 : float
Frequency (e.g. Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (e.g. Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
y : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral
(from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below.
See Also
--------
sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f0 and f1 must be nonzero.
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp_phase` for a description of the arguments.
"""
t = asarray(t)
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)
elif method in ['quadratic', 'quad', 'q']:
beta = (f1 - f0) / (t1 ** 2)
if vertex_zero:
phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)
else:
phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)
elif method in ['logarithmic', 'log', 'lo']:
if f0 * f1 <= 0.0:
raise ValueError("For a logarithmic chirp, f0 and f1 must be "
"nonzero and have the same sign.")
if f0 == f1:
phase = 2 * pi * f0 * t
else:
beta = t1 / log(f1 / f0)
phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f0 == 0 or f1 == 0:
raise ValueError("For a hyperbolic chirp, f0 and f1 must be "
"nonzero.")
if f0 == f1:
# Degenerate case: constant frequency.
phase = 2 * pi * f0 * t
else:
# Singular point: the instantaneous frequency blows up
# when t == sing.
sing = -f1 * t1 / (f0 - f1)
phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing))
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic',"
" or 'hyperbolic', but a value of %r was given."
% method)
return phase
def sweep_poly(t, poly, phi=0):
"""
Frequency-swept cosine generator, with a time-dependent frequency.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1-D array_like or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees, Default: 0.
Returns
-------
sweep_poly : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral
(from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above.
See Also
--------
chirp
Notes
-----
.. versionadded:: 0.8.0
If `poly` is a list or ndarray of length `n`, then the elements of
`poly` are the coefficients of the polynomial, and the instantaneous
frequency is:
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of `numpy.poly1d`, then the instantaneous
frequency is:
``f(t) = poly(t)``
Finally, the output `s` is:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``,
``f(t)`` as defined above.
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2 * pi * polyval(intpoly, t)
return phase
| bsd-3-clause |
peterfpeterson/mantid | scripts/test/MultiPlotting/Subplot_test.py | 3 | 10065 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from matplotlib.gridspec import GridSpec
from unittest import mock
from mantidqt.utils.qt.testing import start_qapplication
from MultiPlotting.multi_plotting_context import PlottingContext
from MultiPlotting.subplot.subplot import subplot
def rm_logic(name):
if name == "two":
return False
return True
@start_qapplication
class SubplotTest(unittest.TestCase):
def setUp(self):
context = PlottingContext()
self.subplot = subplot(context)
self.subplot.canvas.draw = mock.MagicMock()
def setup_rm(self):
self.subplot._raise_rm_window = mock.Mock()
self.subplot._raise_selector_window = mock.Mock()
self.subplot._get_rm_window = mock.Mock()
self.subplot._create_select_window = mock.MagicMock()
def test_rm_one_plot_new_window(self):
self.subplot._rm_window = None
self.subplot._selector_window = None
self.subplot._context.subplots["one"] = mock.Mock()
self.setup_rm()
self.subplot._rm()
self.assertEqual(self.subplot._raise_rm_window.call_count, 0)
self.assertEqual(self.subplot._raise_selector_window.call_count, 0)
self.assertEqual(self.subplot._get_rm_window.call_count, 1)
self.assertEqual(self.subplot._create_select_window.call_count, 0)
def test_rm_one_plot_old_window(self):
self.subplot._rm_window = mock.Mock()
self.subplot._selector_window = None
self.subplot._context.subplots["one"] = mock.Mock()
self.setup_rm()
self.subplot._rm()
self.assertEqual(self.subplot._raise_rm_window.call_count, 0)
self.assertEqual(self.subplot._raise_selector_window.call_count, 0)
self.assertEqual(self.subplot._get_rm_window.call_count, 0)
self.assertEqual(self.subplot._create_select_window.call_count, 0)
def test_rm_two_plots_new_window(self):
self.subplot._rm_window = None
self.subplot._selector_window = None
self.subplot._context.subplots["one"] = mock.Mock()
self.subplot._context.subplots["two"] = mock.Mock()
self.setup_rm()
self.subplot._rm()
self.assertEqual(self.subplot._raise_rm_window.call_count, 0)
self.assertEqual(self.subplot._raise_selector_window.call_count, 0)
self.assertEqual(self.subplot._get_rm_window.call_count, 0)
self.assertEqual(self.subplot._create_select_window.call_count, 1)
def test_rm_two_plots_old_select_window(self):
self.subplot._rm_window = None
self.subplot._selector_window = mock.Mock()
self.subplot._context.subplots["one"] = mock.Mock()
self.subplot._context.subplots["two"] = mock.Mock()
self.setup_rm()
self.subplot._rm()
self.assertEqual(self.subplot._raise_rm_window.call_count, 0)
self.assertEqual(self.subplot._raise_selector_window.call_count, 0)
self.assertEqual(self.subplot._get_rm_window.call_count, 0)
self.assertEqual(self.subplot._create_select_window.call_count, 1)
def test_rm_two_plots_old_rm_window(self):
self.subplot._rm_window = mock.Mock()
self.subplot._selector_window = None
self.subplot._context.subplots["one"] = mock.Mock()
self.subplot._context.subplots["two"] = mock.Mock()
self.setup_rm()
self.subplot._rm()
self.assertEqual(self.subplot._raise_rm_window.call_count, 0)
self.assertEqual(self.subplot._raise_selector_window.call_count, 0)
self.assertEqual(self.subplot._get_rm_window.call_count, 0)
self.assertEqual(self.subplot._create_select_window.call_count, 1)
def setup_apply_rm(self):
self.subplot._rm_window = mock.Mock()
self.subplot._rm_window.subplot = "test"
self.subplot._context.subplots["test"] = mock.MagicMock()
self.subplot._remove_subplot = mock.Mock()
self.subplot._close_rm_window = mock.Mock()
def test_apply_rmAll(self):
names = ["one", "two", "three"]
self.setup_apply_rm()
self.subplot._rm_window.getState = mock.Mock(return_value=True)
self.subplot._apply_rm(names)
self.assertEqual(self.subplot._context.subplots["test"].removeLine.call_count, 3)
self.assertEqual(self.subplot._close_rm_window.call_count, 1)
def test_apply_rmNone(self):
names = ["one", "two", "three"]
self.setup_apply_rm()
self.subplot._rm_window.getState = mock.Mock(return_value=False)
self.subplot._apply_rm(names)
self.assertEqual(self.subplot._context.subplots["test"].removeLine.call_count, 0)
self.assertEqual(self.subplot._close_rm_window.call_count, 1)
def test_apply_rmSome(self):
names = ["one", "two", "three"]
self.setup_apply_rm()
self.subplot._rm_window.getState = mock.Mock(side_effect=rm_logic)
self.subplot._apply_rm(names)
self.assertEqual(self.subplot._context.subplots["test"].removeLine.call_count, 2)
self.assertEqual(self.subplot._close_rm_window.call_count, 1)
def test_addSubplot(self):
self.subplot._update = mock.Mock()
gridspec = GridSpec(2, 2)
self.subplot._context.update_gridspec = mock.Mock()
self.subplot._context._gridspec = gridspec
self.subplot.add_subplot("test", 3)
self.subplot._context.update_gridspec.assert_called_with(4)
self.assertEqual(self.subplot._update.call_count, 1)
def test_replaced_ws_false(self):
one = mock.Mock()
two = mock.Mock()
self.subplot._context.subplots["one"] = one
self.subplot._context.subplots["two"] = two
self.subplot.canvas.draw = mock.Mock()
ws = mock.Mock()
self.subplot._context.subplots["one"].replace_ws = mock.Mock(return_value=False)
self.subplot._context.subplots["two"].replace_ws = mock.Mock(return_value=False)
self.subplot._replaced_ws(ws)
self.assertEqual(self.subplot.canvas.draw.call_count, 0)
def test_replaced_ws(self):
one = mock.Mock()
two = mock.Mock()
self.subplot._context.subplots["one"] = one
self.subplot._context.subplots["two"] = two
self.subplot.canvas.draw = mock.Mock()
ws = mock.Mock()
self.subplot._context.subplots["one"].replace_ws = mock.Mock(return_value=False)
self.subplot._context.subplots["two"].replace_ws = mock.Mock(return_value=True)
self.subplot._replaced_ws(ws)
self.assertEqual(self.subplot.canvas.draw.call_count, 1)
def test_replaced_ws_true(self):
one = mock.Mock()
two = mock.Mock()
self.subplot._context.subplots["one"] = one
self.subplot._context.subplots["two"] = two
self.subplot.canvas.draw = mock.Mock()
ws = mock.Mock()
self.subplot._context.subplots["one"].replace_ws = mock.Mock(return_value=True)
self.subplot._context.subplots["two"].replace_ws = mock.Mock(return_value=True)
self.subplot._replaced_ws(ws)
self.assertEqual(self.subplot.canvas.draw.call_count, 2)
def test_that_connect_rm_signal_calls_the_correct_function(self):
self.subplot.signal_rm_line = mock.Mock()
self.subplot.connect_rm_line_signal("slot value")
self.subplot.signal_rm_line.connect.assert_called_with("slot value")
def test_that_disconnect_rm_signal_calls_the_correct_function(self):
self.subplot.signal_rm_line = mock.Mock()
self.subplot.disconnect_rm_line_signal()
self.assertEqual(1, self.subplot.signal_rm_line.disconnect.call_count)
@staticmethod
def rm_window_side_effect(name):
return True
def test_that_remove_line_calls_removeLine_the_correct_number_of_times(self):
subplot_name = "subplot"
lines = ["one", "two", "three"]
calls = [mock.call("one"), mock.call("two"), mock.call("three")]
self.subplot._context.subplots = {subplot_name: mock.Mock()}
self.subplot.remove_lines(subplot_name, lines)
self.subplot._context.subplots[subplot_name].removeLine.assert_has_calls(calls)
def test_that_remove_lines_removes_subplot_is_no_line_is_present(self):
subplot_name = "subplot"
self.subplot._context.get_lines = mock.Mock(return_value=[])
self.subplot._context.subplots = {subplot_name: mock.Mock()}
self.subplot._remove_subplot = mock.Mock()
self.subplot.canvas = mock.Mock()
self.subplot.remove_lines(subplot_name, ["one"])
self.assertEqual(1, self.subplot._remove_subplot.call_count)
self.assertEqual(0, self.subplot.canvas.draw.call_count)
def test_that_remove_lines_updates_canvas_without_closing_plot_if_lines_are_present(self):
subplot_name = "subplot"
self.subplot._context.get_lines = mock.Mock(return_value=["not empty"])
self.subplot._context.subplots = {subplot_name: mock.Mock()}
self.subplot._remove_subplot = mock.Mock()
self.subplot.canvas = mock.Mock()
self.subplot.remove_lines(subplot_name, ["one"])
self.assertEqual(0, self.subplot._remove_subplot.call_count)
self.assertEqual(1, self.subplot.canvas.draw.call_count)
def test_that_remove_lines_emits_signal(self):
subplot_name = "subplot"
self.subplot._context.get_lines = mock.Mock(return_value=["not empty"])
self.subplot._context.subplots = {subplot_name: mock.Mock()}
self.subplot.signal_rm_line = mock.Mock()
self.subplot.canvas = mock.Mock()
self.subplot.remove_lines(subplot_name, ["one"])
self.assertEqual(1, self.subplot.signal_rm_line.emit.call_count)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
jwdegee/2017_eLife | 2_group.py | 1 | 10345 | #!/usr/bin/env python
# encoding: utf-8
"""
================================================
Created by Jan Willem de Gee on 2014-06-01.
Copyright (c) 2009 jwdegee. All rights reserved.
================================================
"""
import os, sys, datetime
import subprocess, logging
import scipy as sp
import scipy.stats as stats
import numpy as np
import matplotlib.pylab as pl
from IPython import embed as shell
this_raw_folder = '/home/raw_data/UvA/Donner_lab/2017_eLife/1_fMRI_yesno_visual/'
this_project_folder = '/home/shared/UvA/Niels_UvA/Visual_UvA2/'
analysisFolder = os.path.join(this_project_folder, 'analysis')
sys.path.append( analysisFolder )
sys.path.append( os.environ['ANALYSIS_HOME'] )
from Tools.Sessions import *
from Tools.Subjects.Subject import *
from Tools.Run import *
from Tools.Projects.Project import *
from defs_fmri_group import defs_fmri_group
import defs_pupil
# SUBJECTS:
# ---------
subjects = ['sub-01', 'sub-02', 'sub-03', 'sub-04', 'sub-05', 'sub-06', 'sub-07', 'sub-08', 'sub-09', 'sub-10', 'sub-11', 'sub-12', 'sub-13', 'sub-14']
nr_sessions = [2,2,2,3,2,2,2,2,2,2,2,2,2,2]
# PUPIL:
# ------
# pupilAnalysisSessionAcross = defs_pupil.pupilAnalysesAcross(subjects=subjects, experiment_name='pupil_yes_no', sample_rate_new=20, project_directory=this_project_folder)
# pupilAnalysisSessionAcross.behavior_choice()
# pupilAnalysisSessionAcross.behavior_normalized(prepare=False)
# pupilAnalysisSessionAcross.SDT_correlation(bins=5)
# pupilAnalysisSessionAcross.rt_distributions()
# pupilAnalysisSessionAcross.drift_diffusion()
# pupilAnalysisSessionAcross.average_pupil_responses()
# pupilAnalysisSessionAcross.grand_average_pupil_response()
# pupilAnalysisSessionAcross.SDT_across_time()
# pupilAnalysisSessionAcross.correlation_PPRa_BPD()
# pupilAnalysisSessionAcross.GLM_betas()
# fMRI:
# -----
for split_by in ['pupil_d',]:
# for split_by in ['yes',]:
fMRI_across = defs_fmri_group(subjects=subjects, nr_sessions=nr_sessions, base_dir=os.path.join(this_project_folder), split_by=split_by)
rois = [
'V1_center',
'V1_surround',
'V2_center',
'V2_surround',
'V3_center',
'V3_surround',
'lr_aIPS',
'lr_PCeS',
'lr_M1',
'sl_IPL',
'sl_SPL1',
'sl_SPL2',
'sl_pIns',
# 'S_intrapariet_and_P_trans',
# 'G_and_S_cingul-Mid-Ant',
# 'S_temporal_sup',
# 'G_precuneus',
# 'S_front_inf',
# 'S_orbital_med-olfact',
# cortex:
# 'Pole_occipital',
# 'cortex_dilated_mask',
# 'G_front_middle',
# 'G_and_S_cingul-Ant',
# 'S_circular_insula_ant',
# 'G_orbital',
# 'S_orbital-H_Shaped',
# 'S_orbital_lateral',
# 'G_front_inf-Orbital',
# 'S_orbital_med-olfact',
# # brainstem:
# 'LC_standard_2',
# 'LC_standard_1',
# 'mean_SN',
# 'mean_VTA',
# 'basal_forebrain_4',
# 'basal_forebrain_123',
# 'sup_col_jw',
# 'inf_col_jw',
# '4th_ventricle',
# 'LC_JW',
# 'LC_JW_nn',
# 'AAN_VTA',
# 'AAN_PAG',
# 'AAN_PBC',
# 'AAN_PO',
# 'AAN_PPN',
# 'AAN_LC',
# 'AAN_MR',
# 'AAN_MRF',
# 'AAN_DR',
]
# fMRI_across.surface_labels_to_vol()
# fMRI_across.correlation_per_subject(rois=rois, data_type='clean_False')
# fMRI_across.single_trial_correlation_ITI(data_type='clean_False')
# fMRI_across.single_trial_correlation(data_type='clean_4th_ventricle')
# fMRI_across.single_trial_correlation2(data_type='clean_False')
# fMRI_across.brainstem_to_behaviour(data_type='clean_4th_ventricle')
# fMRI_across.single_trial_multiple_regression(data_type='clean_4th_ventricle')
# fMRI_across.rates_across_trials(data_type='clean_False')
# fMRI_across.sequential_effects(data_type='clean_False')
# fMRI_across.correlation_bars()
# fMRI_across.correlation_bars_single_trial(rois=['superior_colliculus', 'basal_forebrain', 'mean_fullMB', 'LC_JW',], data_type='clean_4th_ventricle')
# fMRI_across.correlation_bars_binned_all_rois(bins=385, data_type='clean_False')
# fMRI_across.BRAINSTEM_correlation_bars()
# fMRI_across.BRAINSTEM_choice(data_type='clean_4th_ventricle')
# fMRI_across.BRAINSTEM_bar_plots(data_type='clean_4th_ventricle')
# fMRI_across.BRAINSTEM_correlation_matrix_single_trial(rois=['sup_col_jw', 'inf_col_jw', 'mean_SN', 'mean_VTA', 'LC_JW', 'basal_forebrain_123', 'basal_forebrain_4'], data_type='clean_4th_ventricle', partial=False)
# fMRI_across.correlation_matrix_single_trial_all_rois(data_type='clean_False', partial=False)
# fMRI_across.M1_connectivity(data_type='clean_False')
# fMRI_across.multivariate_localizer()
# fMRI_across.multivariate_task(data_type='clean_False')
# fMRI_across.ANOVA_LC()
bins_by = ['all', 'pupil']
bins = [385, 50]
partials = [True, False]
for b in bins:
# fMRI_across.correlation_bars_binned(bins=b, rois=['superior_colliculus', 'mean_SN', 'mean_VTA', 'LC_JW', 'basal_forebrain'], data_type='clean_4th_ventricle')
for bin_by in bins_by:
# for partial in partials:
# fMRI_across.correlation_matrix_binned(bin_by=bin_by, bins=b, partial=partial, rois=['G_and_S_cingul-Mid-Ant', 'superior_colliculus', 'basal_forebrain', 'mean_fullMB', 'LC_JW',], data_type='clean_4th_ventricle',)
pass
# fMRI_across.unpack_atlas()
# comparisons = ['pupil', 'pupil_b', 'yes', 'correct', 'present',]
# data_types = ['clean_False', 'clean_4th_ventricle', 'psc_False', 'psc_4th_ventricle']
# stratifications = [False]
stratifications = [False,]
# data_types = ['clean_4th_ventricle']
data_types = ['clean_False']
# data_types = ['psc_False']
for roi in rois:
for stratification in stratifications:
for data_type in data_types:
# fMRI_across.plot_mean_bars(roi=roi, data_type=data_type, type_response='mean',)
# fMRI_across.plot_event_average_all_trials(roi=roi, data_type=data_type, smooth=False, event_average=True, type_response='mean', stratification=stratification, project_out=False)
# fMRI_across.plot_event_average(roi=roi, data_type=data_type, smooth=False, event_average=True, type_response='mean', stratification=stratification, project_out=False)
pass
# fMRI_across.WHOLEBRAIN_event_related_average_prepare(data_type='clean_MNI', measure='mean',)
# fMRI_across.WHOLEBRAIN_event_related_average_conditions(data_type='clean_MNI', measure='mean',)
# fMRI_across.WHOLEBRAIN_correlation(data_type='clean_MNI', measure='mean', source='pupil_d')
# fMRI_across.WHOLEBRAIN_combine_searchlight(data_type='clean_MNI')
# fMRI_across.WHOLEBRAIN_lateralization_per_session(data_type='clean_MNI_smooth', measure='snr', prepare=False)
# fMRI_across.WHOLEBRAIN_event_related_average_plots(data_type='clean_MNI_smooth', measure='snr')
# fMRI_across.WHOLEBRAIN_correlation(data_type='clean_MNI_smooth', measure='mean', source='pupil_d')
# fMRI_across.WHOLEBRAIN_correlation(data_type='clean_MNI_smooth', measure='mean', source='V123')
# fMRI_across.WHOLEBRAIN_correlation(data_type='clean_MNI_smooth', measure='mean', source='V3')
# fMRI_across.WHOLEBRAIN_correlation(data_type='clean_MNI_smooth', measure='mean', source='V123_center_info')
# fMRI_across.WHOLEBRAIN_correlation(data_type='clean_MNI_smooth', measure='mean', source='pupil_criterion')
# fMRI_across.WHOLEBRAIN_correlation(data_type='clean_MNI_smooth', measure='mean', source='pupil_dprime')
# fMRI_across.WHOLEBRAIN_correlation(data_type='clean_MNI_smooth', measure='mean', source='BOLD_criterion')
# fMRI_across.WHOLEBRAIN_correlation(data_type='clean_MNI_smooth', measure='mean', source='BOLD_dprime')
# fMRI_across.WHOLEBRAIN_correlation(data_type='clean_MNI_smooth', measure='mean', source='BOLD_present')
# fMRI_across.WHOLEBRAIN_correlation(data_type='clean_MNI_smooth', measure='mean', source='BOLD_choice')
# fMRI_across.WHOLEBRAIN_correlation(data_type='clean_MNI_smooth', measure='mean', source='BOLD_moderated_mediation')
# fMRI_across.WHOLEBRAIN_correlation(data_type='clean_MNI_smooth', measure='mean', source='BOLD_PPI')
# fMRI_across.WHOLEBRAIN_correlation_per_session(data_type='clean_MNI_smooth', measure='mean', source='BOLD_choice')
# fMRI_across.WHOLEBRAIN_noise_correlation_make_dataframe(data_type='clean_False')
# fMRI_across.WHOLEBRAIN_noise_correlation(data_type='clean_False', partial=False)
# fMRI_across.WHOLEBRAIN_noise_correlation_bias(data_type='clean_False', partial=False)
# fMRI_across.mutual_inhibition(data_type='clean_MNI', measure='mean', prepare=False)
# fMRI_across.VISUAL_snr(data_type='clean_False')
# fMRI_across.VISUAL_noise_correlation(data_type='clean_False', partial=False)
# fMRI_across.MULTIVARIATE_make_lineplot(data_type='clean_MNI')
# fMRI_across.MULTIVARIATE_plot_patterns(data_type='clean_MNI')
# fMRI_across.MULTIVARIATE_make_dataframe(data_type='clean_MNI', prepare=False)
# fMRI_across.MULTIVARIATE_add_combined_signal()
# fMRI_across.CHOICE_SIGNALS_plots_2()
# fMRI_across.CHOICE_SIGNALS_stim_TPR_interactions()
# fMRI_across.CHOICE_SIGNALS_plots_stratified()
# fMRI_across.CHOICE_SIGNALS_SDT()
# fMRI_across.CHOICE_SIGNALS_logistic_regressions()
# fMRI_across.CHOICE_SIGNALS_choice_probability()
# fMRI_across.CHOICE_SIGNALS_choice_probability_plot()
# fMRI_across.CHOICE_SIGNALS_choice_probability_pupil_plot()
# fMRI_across.CHOICE_SIGNALS_ROC_curve()
# fMRI_across.CHOICE_SIGNALS_correlation_matrix()
# fMRI_across.CHOICE_SIGNALS_coupling_2()
# fMRI_across.CHOICE_SIGNALS_bars_to_criterion()
# fMRI_across.CHOICE_SIGNALS_behavioural_correlation()
# fMRI_across.CHOICE_SIGNALS_M1_correlation()
# fMRI_across.CHOICE_SIGNALS_PPI_analysis()
# fMRI_across.CHOICE_SIGNALS_mediation_analysis()
# fMRI_across.CHOICE_SIGNALS_to_choice()
# fMRI_across.CHOICE_SIGNALS_variability()
# fMRI_across.DDM_dataframe()
# fMRI_across.simulation()
| mit |
tectronics/ambhas | ambhas/krige.py | 3 | 8587 | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 9 17:55:54 2011
@author: Sat Kumar Tomer
@website: www.ambhas.com
@email: [email protected]
"""
# import required modules
import numpy as np
import matplotlib.pylab as plt
class OK:
"""
This performs the ordinary kriging
Input:
x: x vector of location
Y: y vector of location
z: data vector at location (x,y)
Output:
None
Methods:
variogram: estimate the variogram
"""
def __init__(self,x,y,z):
self.x = x.flatten()
self.y = y.flatten()
self.z = z.flatten()
def variogram(self, var_type='averaged', n_lag=9):
"""
var_type: averaged or scattered
"""
x = self.x
y = self.y
z = self.z
# make the meshgrid
X1,X2 = np.meshgrid(x,x)
Y1,Y2 = np.meshgrid(y,y)
Z1,Z2 = np.meshgrid(z,z)
D = np.sqrt((X1 - X2)**2 + (Y1 - Y2)**2)
G = 0.5*(Z1 - Z2)**2
indx = range(len(z))
C,R = np.meshgrid(indx,indx)
G = G[R>C]
self.D = D
DI = D[R > C]
# group the variogram
# the group are formed based on the equal number of bin
total_n = len(DI)
group_n = int(total_n/n_lag)
sor_i = np.argsort(DI)[::-1]
DE = np.empty(n_lag)
GE = np.empty(n_lag)
for i in range(n_lag):
if i<n_lag-1:
DE[i] = DI[sor_i[group_n*i:group_n*(i+1)]].mean()
GE[i] = G[sor_i[group_n*i:group_n*(i+1)]].mean()
else:
DE[i] = DI[sor_i[group_n*i:]].mean()
GE[i] = G[sor_i[group_n*i:]].mean()
if var_type == 'scattered':
return DI,G
elif var_type == 'averaged':
return DE,GE
else:
raise ValueError('var_type should be either averaged or scatter')
def vario_model(self, lags, model_par, model_type='linear'):
"""
Input:
model_type : the type of variogram model
spherical
linear
exponential
model_par: parameters of variogram model
this should be a dictionary
e.g. for shperical and exponential
model_par = {'nugget':0, 'range':1, 'sill':1}
for linear
model_par = {'nugget':0, 'slope':1}
Output:
G: The fitted variogram model
"""
if model_type == 'spherical':
n = model_par['nugget']
r = model_par['range']
s = model_par['sill']
l = lags
G = n + (s*(1.5*l/r - 0.5*(l/r)**3)*(l<=r) + s*(l>r))
elif model_type == 'linear':
n = model_par['nugget']
s = model_par['slope']
l = lags
G = n + s*l
elif model_type == 'exponential':
n = model_par['nugget']
r = model_par['range']
s = model_par['sill']
l = lags
G = n + s*(1 - np.exp(-3*l/r))
else:
raise ValueError('model_type should be spherical or linear or exponential')
return G
def int_vario(self, Xg, Yg, model_par, model_type):
"""
this computes the integral of the variogram over a square
using the Monte Carlo integration method
this works only for two dimensional grid
Input:
Xg: x location where krigged data is required
Yg: y location whre krigged data is required
model_par: see the vario_model
model_type: see the vario_model
"""
avg_vario = np.empty((len(self.x), (len(Yg)-1)*(len(Xg)-1)))
for k in range(len(self.x)):
avg_vario_ens = np.empty((len(Yg)-1, len(Xg)-1))
for i in range(len(Yg)-1):
for j in range(len(Xg)-1):
Xg_rand = Xg[j]+np.random.rand(10)*(Xg[j+1]-Xg[j])
Yg_rand = Yg[i]+np.random.rand(10)*(Yg[i+1]-Yg[i])
DOR = ((self.x[k] - Xg_rand)**2 + (self.y[k] - Yg_rand)**2)**0.5
avg_vario_ens[i,j] = self.vario_model(DOR, model_par, model_type).mean()
avg_vario[k,:] = avg_vario_ens.flatten()
return avg_vario
def krige(self, Xg, Yg, model_par, model_type):
"""
Input:
Xg: x location where krigged data is required
Yg: y location whre kirgged data is required
model_par: see the vario_model
model_type: see the vario_model
Attributes:
self.Zg : krigged data
self.s2_k = variance in the data
"""
# set up the Gmod matrix
n = len(self.x)
Gmod = np.empty((n+1,n+1))
Gmod[:n, :n] = self.vario_model(self.D, model_par, model_type)
Gmod[:,n] = 1
Gmod[n,:] = 1
Gmod[n,n] = 0
Gmod = np.matrix(Gmod)
# inverse of Gmod
Ginv = Gmod.I
Xg = Xg.flatten()
Yg = Yg.flatten()
Zg = np.empty(Xg.shape)
s2_k = np.empty(Xg.shape)
for k in range(len(Xg)):
DOR = ((self.x - Xg[k])**2 + (self.y - Yg[k])**2)**0.5
GR = np.empty((n+1,1))
GR[:n,0] = self.vario_model(DOR, model_par, model_type)
GR[n,0] = 1
E = np.array(Ginv * GR )
Zg[k] = np.sum(E[:n,0]*self.z)
s2_k[k] = np.sum(E[:n,0]*GR[:n,0])+ E[n, 0]
self.Zg = Zg
self.s2_k = s2_k
def block_krige(self, Xg, Yg, model_par, model_type):
"""
Input:
Xg: x location where krigged data is required
Yg: y location whre krigged data is required
model_par: see the vario_model
model_type: see the vario_model
Attributes:
self.Zg : krigged data
self.s2_k = variance in the data
"""
# set up the Gmod matrix
n = len(self.x)
Gmod = np.empty((n+1,n+1))
Gmod[:n, :n] = self.vario_model(self.D, model_par, model_type)
Gmod[:,n] = 1
Gmod[n,:] = 1
Gmod[n,n] = 0
Gmod = np.matrix(Gmod)
# inverse of Gmod
Ginv = Gmod.I
Xg = Xg.flatten()
Yg = Yg.flatten()
avg_vario = self.int_vario(Xg, Yg, model_par, model_type)
Zg = np.empty(avg_vario.shape[1])
s2_k = np.empty(avg_vario.shape[1])
for k in range(avg_vario.shape[1]):
GR = np.empty((n+1,1))
GR[:n,0] = avg_vario[:,k]
GR[n,0] = 1
E = np.array(Ginv * GR )
Zg[k] = np.sum(E[:n,0]*self.z)
s2_k[k] = np.sum(E[:n,0]*GR[:n,0])+ E[n, 0]
self.Zg = Zg.reshape(len(Yg)-1, len(Xg)-1)
self.s2_k = s2_k.reshape(len(Yg)-1, len(Xg)-1)
if __name__ == "__main__":
# generate some sythetic data
x = np.random.rand(20)
y = np.random.rand(20)
z = 0.0*np.random.normal(size=20)+x+y
z[1] = np.NAN
foo = OK(x,y,z)
#ax,ay = foo.variogram('scattered')
ax,ay = foo.variogram()
plt.plot(ax,ay,'ro')
lags = np.linspace(0,5)
model_par = {}
model_par['nugget'] = 0
model_par['range'] = 1
model_par['sill'] = 2.0
G = foo.vario_model(lags, model_par, model_type = 'exponential')
plt.plot(lags, G, 'k')
plt.show()
Rx = np.linspace(-1,1,250)
Ry = np.linspace(0,1,150)
XI,YI = np.meshgrid(Rx,Ry)
foo.krige(XI, YI, model_par, 'exponential')
plt.matshow(foo.Zg.reshape(150,250))
plt.show()
print('Processing over')
# # block kriging
# xg = np.linspace(0,1,5)
# yg = np.linspace(0,1,8)
# foo.block_krige(xg, yg, model_par, model_type = 'exponential')
# plt.imshow(foo.s2_k, extent=(0,1,0,1))
# plt.imshow(foo.Zg, extent=(0,1,0,1))
# plt.matshow(foo.Zg)
# plt.matshow(foo.s2_k)
# plt.colorbar()
# plt.plot(x,y, 'ro')
# plt.show() | lgpl-2.1 |
jeremyfix/pylearn2 | pylearn2/train_extensions/roc_auc.py | 15 | 4888 | """
TrainExtension subclass for calculating ROC AUC scores on monitoring
dataset(s), reported via monitor channels.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "3-clause BSD"
__maintainer__ = "Steven Kearnes"
import numpy as np
try:
from sklearn.metrics import roc_auc_score
except ImportError:
roc_auc_score = None
import theano
from theano import gof, config
from theano import tensor as T
from pylearn2.train_extensions import TrainExtension
class RocAucScoreOp(gof.Op):
"""
Theano Op wrapping sklearn.metrics.roc_auc_score.
Parameters
----------
name : str, optional (default 'roc_auc')
Name of this Op.
use_c_code : WRITEME
"""
def __init__(self, name='roc_auc', use_c_code=theano.config.cxx):
super(RocAucScoreOp, self).__init__(use_c_code)
self.name = name
def make_node(self, y_true, y_score):
"""
Calculate ROC AUC score.
Parameters
----------
y_true : tensor_like
Target class labels.
y_score : tensor_like
Predicted class labels or probabilities for positive class.
"""
y_true = T.as_tensor_variable(y_true)
y_score = T.as_tensor_variable(y_score)
output = [T.scalar(name=self.name, dtype=config.floatX)]
return gof.Apply(self, [y_true, y_score], output)
def perform(self, node, inputs, output_storage):
"""
Calculate ROC AUC score.
Parameters
----------
node : Apply instance
Symbolic inputs and outputs.
inputs : list
Sequence of inputs.
output_storage : list
List of mutable 1-element lists.
"""
if roc_auc_score is None:
raise RuntimeError("Could not import from sklearn.")
y_true, y_score = inputs
try:
roc_auc = roc_auc_score(y_true, y_score)
except ValueError:
roc_auc = np.nan
output_storage[0][0] = theano._asarray(roc_auc, dtype=config.floatX)
class RocAucChannel(TrainExtension):
"""
Adds a ROC AUC channel to the monitor for each monitoring dataset.
This monitor will return nan unless both classes are represented in
y_true. For this reason, it is recommended to set monitoring_batches
to 1, especially when using unbalanced datasets.
Parameters
----------
channel_name_suffix : str, optional (default 'roc_auc')
Channel name suffix.
positive_class_index : int, optional (default 1)
Index of positive class in predicted values.
negative_class_index : int or None, optional (default None)
Index of negative class in predicted values for calculation of
one vs. one performance. If None, uses all examples not in the
positive class (one vs. the rest).
"""
def __init__(self, channel_name_suffix='roc_auc', positive_class_index=1,
negative_class_index=None):
self.channel_name_suffix = channel_name_suffix
self.positive_class_index = positive_class_index
self.negative_class_index = negative_class_index
def setup(self, model, dataset, algorithm):
"""
Add ROC AUC channels for monitoring dataset(s) to model.monitor.
Parameters
----------
model : object
The model being trained.
dataset : object
Training dataset.
algorithm : object
Training algorithm.
"""
m_space, m_source = model.get_monitoring_data_specs()
state, target = m_space.make_theano_batch()
y = T.argmax(target, axis=1)
y_hat = model.fprop(state)[:, self.positive_class_index]
# one vs. the rest
if self.negative_class_index is None:
y = T.eq(y, self.positive_class_index)
# one vs. one
else:
pos = T.eq(y, self.positive_class_index)
neg = T.eq(y, self.negative_class_index)
keep = T.add(pos, neg).nonzero()
y = T.eq(y[keep], self.positive_class_index)
y_hat = y_hat[keep]
roc_auc = RocAucScoreOp(self.channel_name_suffix)(y, y_hat)
roc_auc = T.cast(roc_auc, config.floatX)
for dataset_name, dataset in algorithm.monitoring_dataset.items():
if dataset_name:
channel_name = '{0}_{1}'.format(dataset_name,
self.channel_name_suffix)
else:
channel_name = self.channel_name_suffix
model.monitor.add_channel(name=channel_name,
ipt=(state, target),
val=roc_auc,
data_specs=(m_space, m_source),
dataset=dataset)
| bsd-3-clause |
AndrewRook/machine_learning | figure_5-24.py | 1 | 6544 | # Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from scipy.special import gamma
from scipy.stats import norm
from sklearn.neighbors import BallTree
from astroML.density_estimation import GaussianMixture1D
from astroML.plotting import plot_mcmc,hist
# hack to fix an import issue in older versions of pymc
import scipy
scipy.derivative = scipy.misc.derivative
import pymc
def get_logp(S, model):
"""compute log(p) given a pyMC model"""
M = pymc.MAP(model)
traces = np.array([S.trace(s)[:] for s in S.stochastics])
logp = np.zeros(traces.shape[1])
for i in range(len(logp)):
logp[i] = -M.func(traces[:, i])
return logp
def estimate_bayes_factor(traces, logp, r=0.05, return_list=False):
"""Estimate the bayes factor using the local density of points"""
D, N = traces.shape
# compute volume of a D-dimensional sphere of radius r
Vr = np.pi ** (0.5 * D) / gamma(0.5 * D + 1) * (r ** D)
# use neighbor count within r as a density estimator
bt = BallTree(traces.T)
count = bt.query_radius(traces.T, r=r, count_only=True)
BF = logp + np.log(N) + np.log(Vr) - np.log(count)
if return_list:
return BF
else:
p25, p50, p75 = np.percentile(BF, [25, 50, 75])
return p50, 0.7413 * (p75 - p25)
#------------------------------------------------------------
# Generate the data
mu1_in = 0
sigma1_in = 0.3
mu2_in = 1
sigma2_in = 0.3#1
ratio_in = 1.5
N = 200
np.random.seed(10)
gm = GaussianMixture1D([mu1_in, mu2_in],
[sigma1_in, sigma2_in],
[ratio_in, 1])
x_sample = gm.sample(N)
#------------------------------------------------------------
# Set up pyMC model: single gaussian
# 2 parameters: (mu, sigma)
M1_mu = pymc.Uniform('M1_mu', -5, 5, value=0)
M1_log_sigma = pymc.Uniform('M1_log_sigma', -10, 10, value=0)
@pymc.deterministic
def M1_sigma(M1_log_sigma=M1_log_sigma):
return np.exp(M1_log_sigma)
@pymc.deterministic
def M1_tau(M1_sigma=M1_sigma):
return 1. / M1_sigma ** 2
M1 = pymc.Normal('M1', M1_mu, M1_tau, observed=True, value=x_sample)
model1 = dict(M1_mu=M1_mu, M1_log_sigma=M1_log_sigma,
M1_sigma=M1_sigma,
M1_tau=M1_tau, M1=M1)
#------------------------------------------------------------
# Set up pyMC model: double gaussian
# 5 parameters: (mu1, mu2, sigma1, sigma2, ratio)
def doublegauss_like(x, mu1, mu2, sigma1, sigma2, ratio):
"""log-likelihood for double gaussian"""
r1 = ratio / (1. + ratio)
r2 = 1 - r1
L = r1 * norm(mu1, sigma1).pdf(x) + r2 * norm(mu2, sigma2).pdf(x)
L[L == 0] = 1E-16 # prevent divide-by-zero error
logL = np.log(L).sum()
if np.isinf(logL):
raise pymc.ZeroProbability
else:
return logL
def rdoublegauss(mu1, mu2, sigma1, sigma2, ratio, size=None):
"""random variable from double gaussian"""
r1 = ratio / (1. + ratio)
r2 = 1 - r1
R = np.asarray(np.random.random(size))
Rshape = R.shape
R = np.atleast1d(R)
mask1 = (R < r1)
mask2 = ~mask1
N1 = mask1.sum()
N2 = R.size - N1
R[mask1] = norm(mu1, sigma1).rvs(N1)
R[mask2] = norm(mu2, sigma2).rvs(N2)
return R.reshape(Rshape)
DoubleGauss = pymc.stochastic_from_dist('doublegauss',
logp=doublegauss_like,
random=rdoublegauss,
dtype=np.float,
mv=True)
# set up our Stochastic variables, mu1, mu2, sigma1, sigma2, ratio
M2_mu1 = pymc.Uniform('M2_mu1', -5, 5, value=0)
M2_mu2 = pymc.Uniform('M2_mu2', -5, 5, value=1)
M2_log_sigma1 = pymc.Uniform('M2_log_sigma1', -10, 10, value=0)
M2_log_sigma2 = pymc.Uniform('M2_log_sigma2', -10, 10, value=0)
@pymc.deterministic
def M2_sigma1(M2_log_sigma1=M2_log_sigma1):
return np.exp(M2_log_sigma1)
@pymc.deterministic
def M2_sigma2(M2_log_sigma2=M2_log_sigma2):
return np.exp(M2_log_sigma2)
M2_ratio = pymc.Uniform('M2_ratio', 1E-3, 1E3, value=1)
M2 = DoubleGauss('M2', M2_mu1, M2_mu2, M2_sigma1, M2_sigma2, M2_ratio,
observed=True, value=x_sample)
model2 = dict(M2_mu1=M2_mu1, M2_mu2=M2_mu2,
M2_log_sigma1=M2_log_sigma1, M2_log_sigma2=M2_log_sigma2,
M2_sigma1=M2_sigma1, M2_sigma2=M2_sigma2,
M2_ratio=M2_ratio, M2=M2)
#------------------------------------------------------------
# Set up MCMC sampling
def compute_MCMC_models(Niter=10000, burn=1000, rseed=0):
pymc.numpy.random.seed(rseed)
S1 = pymc.MCMC(model1)
S1.sample(iter=Niter, burn=burn)
trace1 = np.vstack([S1.trace('M1_mu')[:],
S1.trace('M1_sigma')[:]])
logp1 = get_logp(S1, model1)
S2 = pymc.MCMC(model2)
S2.sample(iter=Niter, burn=burn)
trace2 = np.vstack([S2.trace('M2_mu1')[:],
S2.trace('M2_mu2')[:],
S2.trace('M2_sigma1')[:],
S2.trace('M2_sigma2')[:],
S2.trace('M2_ratio')[:]])
logp2 = get_logp(S2, model2)
return trace1, logp1, trace2, logp2
trace1, logp1, trace2, logp2 = compute_MCMC_models()
#------------------------------------------------------------
# Compute Odds ratio with density estimation technique
BF1, dBF1 = estimate_bayes_factor(trace1, logp1, r=0.02)
BF1_list = estimate_bayes_factor(trace1,logp1,r=0.02,return_list = True)
BF2, dBF2 = estimate_bayes_factor(trace2, logp2, r=0.05)
BF2_list = estimate_bayes_factor(trace2,logp2,r=0.05,return_list = True)
print "Bayes Factor (Single Gaussian): Median = {0:.3f}, p75-p25 = {1:.3f}".format(BF1,dBF1)
print "Bayes Factor (Double Gaussian): Median = {0:.3f}, p75-p25 = {1:.3f}".format(BF2,dBF2)
print np.sum(BF1_list),np.sum(BF2_list)
BF1_list_plot = BF1_list[(BF1_list >= BF1-1.*dBF1) & (BF1_list <= BF1+1.*dBF1)]
BF2_list_plot = BF2_list[(BF2_list >= BF2-1.*dBF2) & (BF2_list <= BF2+1.*dBF2)]
ax = plt.figure().add_subplot(111)
hist(BF1_list_plot,bins='knuth',ax=ax,normed=True,color='red',alpha=0.25)
hist(BF2_list,bins='knuth',ax=ax,normed=True,color='green',alpha=0.25)
ax.figure.savefig('figure_5-24_BFhist.png',dpi=300)
| mit |
rusenask/stubo-app | stubo/ext/parse_date.py | 1 | 6451 | """
:copyright: (c) 2015 by OpenCredo.
:license: GPLv3, see LICENSE for more details.
"""
# adapted from https://github.com/pydata/pandas/blob/master/pandas/tseries/tools.py
from datetime import datetime, timedelta
import re
import sys
from StringIO import StringIO
import logging
import dateutil
from dateutil.parser import parse, DEFAULTPARSER
log = logging.getLogger(__name__)
# raise exception if dateutil 2.0 install on 2.x platform
if (sys.version_info[0] == 2 and
dateutil.__version__ == '2.0'): # pragma: no cover
raise Exception('dateutil 2.0 incompatible with Python 2.x, you must '
'install version 1.5 or 2.1+!')
# otherwise a 2nd import won't show the message
_DATEUTIL_LEXER_SPLIT = None
try:
# Since these are private methods from dateutil, it is safely imported
# here so in case this interface changes, pandas will just fallback
# to not using the functionality
from dateutil.parser import _timelex
if hasattr(_timelex, 'split'):
def _lexer_split_from_str(dt_str):
# The StringIO(str(_)) is for dateutil 2.2 compatibility
return _timelex.split(StringIO(str(dt_str)))
_DATEUTIL_LEXER_SPLIT = _lexer_split_from_str
except (ImportError, AttributeError):
pass
def _guess_datetime_format(dt_str, parsed_datetime, dayfirst=True,
dt_str_split=_DATEUTIL_LEXER_SPLIT):
"""
Guess the datetime format of a given datetime string.
Parameters
----------
dt_str : string, datetime string to guess the format of
parsed_datetime : result of dateutil.parser.parse
dayfirst : boolean, default True
If True parses dates with the day first, eg 20/01/2005
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug).
dt_str_split : function, defaults to `_DATEUTIL_LEXER_SPLIT` (dateutil)
This function should take in a datetime string and return
a list of strings, the guess of the various specific parts
e.g. '2011/12/30' -> ['2011', '/', '12', '/', '30']
Returns
-------
ret : datetime format string (for `strftime` or `strptime`)
"""
log.debug('_guess_datetime_format, dt_str={0}'.format(dt_str))
if dt_str_split is None:
return None
if not isinstance(dt_str, basestring):
return None
day_attribute_and_format = (('day',), '%d')
datetime_attrs_to_format = [
(('year', 'month', 'day'), '%Y%m%d'),
(('year',), '%Y'),
(('month',), '%B'),
(('month',), '%b'),
(('month',), '%m'),
day_attribute_and_format,
(('hour',), '%H'),
(('minute',), '%M'),
(('second',), '%S'),
(('microsecond',), '%f'),
(('second', 'microsecond'), '%S.%f'),
]
if dayfirst:
datetime_attrs_to_format.remove(day_attribute_and_format)
datetime_attrs_to_format.insert(0, day_attribute_and_format)
if parsed_datetime is None:
return None
try:
log.debug('dt_str_split(dt_str)')
tokens = dt_str_split(dt_str)
except:
# In case the datetime string can't be split, its format cannot
# be guessed
return None
log.debug('split tokens={0}'.format(tokens))
format_guess = [None] * len(tokens)
found_attrs = set()
for attrs, attr_format in datetime_attrs_to_format:
# If a given attribute has been placed in the format string, skip
# over other formats for that same underlying attribute (IE, month
# can be represented in multiple different ways)
if set(attrs) & found_attrs:
continue
if all(getattr(parsed_datetime, attr) is not None for attr in attrs):
for i, token_format in enumerate(format_guess):
if (token_format is None and
tokens[i] == parsed_datetime.strftime(attr_format)):
format_guess[i] = attr_format
found_attrs.update(attrs)
break
log.debug('found_attrs={0}'.format(found_attrs))
log.debug('format_guess={0}'.format(format_guess))
# Only consider it a valid guess if we have a year, month and day
if len(set(['year', 'month', 'day']) & found_attrs) != 3:
return None
output_format = []
for i, guess in enumerate(format_guess):
if guess is not None:
# Either fill in the format placeholder (like %Y)
output_format.append(guess)
else:
# Or just the token separate (IE, the dashes in "01-01-2013")
try:
# If the token is numeric, then we likely didn't parse it
# properly, so our guess is wrong
if float(tokens[i]) != 0.0:
return None
except ValueError:
pass
output_format.append(tokens[i])
guessed_format = ''.join(output_format)
if parsed_datetime.strftime(guessed_format) == dt_str:
return guessed_format
has_time = re.compile('(.+)([\s]|T)+(.+)')
def parse_date_string(date_str, dayfirst=True, yearfirst=True):
"""
Try hard to parse datetime string, leveraging dateutil plus some extras
Parameters
----------
arg : date string
dayfirst : bool,
yearfirst : bool
Returns
-------
datetime, datetime format string (for `strftime` or `strptime`)
or None if unable parse date str
"""
if not isinstance(date_str, basestring):
return None
arg = date_str.upper()
parse_info = DEFAULTPARSER.info
if len(arg) in (7, 8):
mresult = _attempt_monthly(arg)
log.debug('mresult={0}'.format(mresult))
if mresult:
return mresult
parsed_datetime = DEFAULTPARSER.parse(StringIO(str(arg)), dayfirst=dayfirst,
yearfirst=yearfirst, fuzzy=True)
log.debug('parsed_datetime={0}'.format(parsed_datetime))
if parsed_datetime:
date_format = _guess_datetime_format(date_str, parsed_datetime,
dayfirst=dayfirst)
return parsed_datetime, date_format
def _attempt_monthly(val):
pats = ['%Y-%m', '%m-%Y', '%b %Y', '%b-%Y']
for pat in pats:
try:
ret = datetime.strptime(val, pat)
return ret, pat
except Exception:
pass
| gpl-3.0 |
h2educ/scikit-learn | sklearn/tests/test_discriminant_analysis.py | 35 | 11709 | try:
# Python 2 compat
reload
except NameError:
# Regular Python 3+ import
from importlib import reload
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
# Data is just 9 separable points in the plane
X6 = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y6 = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y7 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X7 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8, 3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct
# values for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = LinearDiscriminantAnalysis(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_priors():
# Test priors (negative priors)
priors = np.array([0.5, -0.5])
clf = LinearDiscriminantAnalysis(priors=priors)
msg = "priors must be non-negative"
assert_raise_message(ValueError, msg, clf.fit, X, y)
# Test that priors passed as a list are correctly handled (run to see if
# failure)
clf = LinearDiscriminantAnalysis(priors=[0.5, 0.5])
clf.fit(X, y)
# Test that priors always sum to 1
priors = np.array([0.5, 0.6])
prior_norm = np.array([0.45, 0.55])
clf = LinearDiscriminantAnalysis(priors=priors)
clf.fit(X, y)
assert_array_almost_equal(clf.priors_, prior_norm, 2)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_lsqr = LinearDiscriminantAnalysis(solver="lsqr")
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = LinearDiscriminantAnalysis(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_explained_variance_ratio():
# Test if the sum of the normalized eigen vectors values equals 1
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_eigen.fit(X, y)
assert_almost_equal(clf_lda_eigen.explained_variance_ratio_.sum(), 1.0, 3)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = LinearDiscriminantAnalysis(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
assert_array_equal(y_pred, y6)
# Assure that it works with 1D data
y_pred1 = clf.fit(X7, y6).predict(X7)
assert_array_equal(y_pred1, y6)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X7)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y6)
y_log_proba_pred1 = clf.predict_log_proba(X7)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X6, y7).predict(X6)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y7))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X6, y4)
def test_qda_priors():
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = QuadraticDiscriminantAnalysis(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X6, y6).predict(X6)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = QuadraticDiscriminantAnalysis().fit(X6, y6)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = QuadraticDiscriminantAnalysis(store_covariances=True).fit(X6, y6)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = QuadraticDiscriminantAnalysis()
with ignore_warnings():
y_pred = clf.fit(X2, y6).predict(X2)
assert_true(np.any(y_pred != y6))
# adding a little regularization fixes the problem
clf = QuadraticDiscriminantAnalysis(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y6)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y6)
# Case n_samples_in_a_class < n_features
clf = QuadraticDiscriminantAnalysis(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
def test_deprecated_lda_qda_deprecation():
def import_lda_module():
import sklearn.lda
# ensure that we trigger DeprecationWarning even if the sklearn.lda
# was loaded previously by another test.
reload(sklearn.lda)
return sklearn.lda
lda = assert_warns(DeprecationWarning, import_lda_module)
assert lda.LDA is LinearDiscriminantAnalysis
def import_qda_module():
import sklearn.qda
# ensure that we trigger DeprecationWarning even if the sklearn.qda
# was loaded previously by another test.
reload(sklearn.qda)
return sklearn.qda
qda = assert_warns(DeprecationWarning, import_qda_module)
assert qda.QDA is QuadraticDiscriminantAnalysis
| bsd-3-clause |
jreback/pandas | pandas/tests/frame/test_logical_ops.py | 2 | 6147 | import operator
import re
import numpy as np
import pytest
from pandas import CategoricalIndex, DataFrame, Interval, Series, isnull
import pandas._testing as tm
class TestDataFrameLogicalOperators:
# &, |, ^
@pytest.mark.parametrize(
"left, right, op, expected",
[
(
[True, False, np.nan],
[True, False, True],
operator.and_,
[True, False, False],
),
(
[True, False, True],
[True, False, np.nan],
operator.and_,
[True, False, False],
),
(
[True, False, np.nan],
[True, False, True],
operator.or_,
[True, False, False],
),
(
[True, False, True],
[True, False, np.nan],
operator.or_,
[True, False, True],
),
],
)
def test_logical_operators_nans(self, left, right, op, expected, frame_or_series):
# GH#13896
result = op(frame_or_series(left), frame_or_series(right))
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
def test_logical_ops_empty_frame(self):
# GH#5808
# empty frames, non-mixed dtype
df = DataFrame(index=[1])
result = df & df
tm.assert_frame_equal(result, df)
result = df | df
tm.assert_frame_equal(result, df)
df2 = DataFrame(index=[1, 2])
result = df & df2
tm.assert_frame_equal(result, df2)
dfa = DataFrame(index=[1], columns=["A"])
result = dfa & dfa
expected = DataFrame(False, index=[1], columns=["A"])
tm.assert_frame_equal(result, expected)
def test_logical_ops_bool_frame(self):
# GH#5808
df1a_bool = DataFrame(True, index=[1], columns=["A"])
result = df1a_bool & df1a_bool
tm.assert_frame_equal(result, df1a_bool)
result = df1a_bool | df1a_bool
tm.assert_frame_equal(result, df1a_bool)
def test_logical_ops_int_frame(self):
# GH#5808
df1a_int = DataFrame(1, index=[1], columns=["A"])
df1a_bool = DataFrame(True, index=[1], columns=["A"])
result = df1a_int | df1a_bool
tm.assert_frame_equal(result, df1a_bool)
# Check that this matches Series behavior
res_ser = df1a_int["A"] | df1a_bool["A"]
tm.assert_series_equal(res_ser, df1a_bool["A"])
def test_logical_ops_invalid(self):
# GH#5808
df1 = DataFrame(1.0, index=[1], columns=["A"])
df2 = DataFrame(True, index=[1], columns=["A"])
msg = re.escape("unsupported operand type(s) for |: 'float' and 'bool'")
with pytest.raises(TypeError, match=msg):
df1 | df2
df1 = DataFrame("foo", index=[1], columns=["A"])
df2 = DataFrame(True, index=[1], columns=["A"])
msg = re.escape("unsupported operand type(s) for |: 'str' and 'bool'")
with pytest.raises(TypeError, match=msg):
df1 | df2
def test_logical_operators(self):
def _check_bin_op(op):
result = op(df1, df2)
expected = DataFrame(
op(df1.values, df2.values), index=df1.index, columns=df1.columns
)
assert result.values.dtype == np.bool_
tm.assert_frame_equal(result, expected)
def _check_unary_op(op):
result = op(df1)
expected = DataFrame(op(df1.values), index=df1.index, columns=df1.columns)
assert result.values.dtype == np.bool_
tm.assert_frame_equal(result, expected)
df1 = {
"a": {"a": True, "b": False, "c": False, "d": True, "e": True},
"b": {"a": False, "b": True, "c": False, "d": False, "e": False},
"c": {"a": False, "b": False, "c": True, "d": False, "e": False},
"d": {"a": True, "b": False, "c": False, "d": True, "e": True},
"e": {"a": True, "b": False, "c": False, "d": True, "e": True},
}
df2 = {
"a": {"a": True, "b": False, "c": True, "d": False, "e": False},
"b": {"a": False, "b": True, "c": False, "d": False, "e": False},
"c": {"a": True, "b": False, "c": True, "d": False, "e": False},
"d": {"a": False, "b": False, "c": False, "d": True, "e": False},
"e": {"a": False, "b": False, "c": False, "d": False, "e": True},
}
df1 = DataFrame(df1)
df2 = DataFrame(df2)
_check_bin_op(operator.and_)
_check_bin_op(operator.or_)
_check_bin_op(operator.xor)
_check_unary_op(operator.inv) # TODO: belongs elsewhere
def test_logical_with_nas(self):
d = DataFrame({"a": [np.nan, False], "b": [True, True]})
# GH4947
# bool comparisons should return bool
result = d["a"] | d["b"]
expected = Series([False, True])
tm.assert_series_equal(result, expected)
# GH4604, automatic casting here
result = d["a"].fillna(False) | d["b"]
expected = Series([True, True])
tm.assert_series_equal(result, expected)
result = d["a"].fillna(False, downcast=False) | d["b"]
expected = Series([True, True])
tm.assert_series_equal(result, expected)
def test_logical_ops_categorical_columns(self):
# GH#38367
intervals = [Interval(1, 2), Interval(3, 4)]
data = DataFrame(
[[1, np.nan], [2, np.nan]],
columns=CategoricalIndex(
intervals, categories=intervals + [Interval(5, 6)]
),
)
mask = DataFrame(
[[False, False], [False, False]], columns=data.columns, dtype=bool
)
result = mask | isnull(data)
expected = DataFrame(
[[False, True], [False, True]],
columns=CategoricalIndex(
intervals, categories=intervals + [Interval(5, 6)]
),
)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
atsiaras/transit_simulator | transit_simulator/__run__.py | 1 | 16925 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
if sys.version_info[0] > 2:
from tkinter import *
import tkinter.ttk as ttk
import tkinter.filedialog as tkFileDialog
from tkinter.messagebox import *
else:
import ttk
from Tkinter import *
import tkFileDialog
from tkMessageBox import *
import warnings
warnings.filterwarnings(
'ignore', message='Matplotlib is building the font cache using fc-list. This may take a moment.')
warnings.filterwarnings(
'ignore', message='The installed version of numexpr 2.4.4 is not supported in pandas and will be not be used')
import matplotlib
matplotlib.use('TkAgg')
import os
import numpy as np
import pylightcurve as plc
from matplotlib.backends.backend_tkagg import FigureCanvasBase, FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.backend_bases import key_press_handler, MouseEvent
def initialise_window(window, window_name, windows_to_hide, windows_to_close, exit_python):
def exit_command():
for i in windows_to_close:
i.destroy()
for i in windows_to_hide:
i.withdraw()
if exit_python:
os._exit(-1)
window.wm_title(window_name)
window.protocol('WM_DELETE_WINDOW', exit_command)
window.withdraw()
def setup_window(window, objects, main_font=None, button_font=None, entries_bd=3):
if button_font is None:
button_font = ['times', 15, 'bold']
if main_font is None:
main_font = ['times', 15]
for row in range(len(objects)):
if len(objects[row]) == 0:
label_empty = Label(window, text='')
label_empty.grid(row=row, column=100)
else:
for obj in objects[row]:
if obj[0].winfo_class() == 'Button':
obj[0].configure(font=button_font)
elif obj[0].winfo_class() == 'Entry':
obj[0].configure(bd=entries_bd, font=main_font)
elif obj[0].winfo_class() in ['Label', 'Radiobutton']:
obj[0].configure(font=main_font)
if len(obj) == 4:
obj[0].grid(row=row, column=obj[1], columnspan=obj[2], rowspan=obj[3])
elif len(obj) == 3:
obj[0].grid(row=row, column=obj[1], columnspan=obj[2])
else:
obj[0].grid(row=row, column=obj[1])
def finalise_window(window, position=5, topmost=False):
window.update_idletasks()
if position == 1:
x = 0
y = 0
elif position == 2:
x = (window.winfo_screenwidth() - window.winfo_reqwidth()) / 2
y = 0
elif position == 3:
x = window.winfo_screenwidth() - window.winfo_reqwidth()
y = 0
elif position == 4:
x = 0
y = (window.winfo_screenheight() - window.winfo_reqheight()) / 2
elif position == 5:
x = (window.winfo_screenwidth() - window.winfo_reqwidth()) / 2
y = (window.winfo_screenheight() - window.winfo_reqheight()) / 2
elif position == 6:
x = window.winfo_screenwidth() - window.winfo_reqwidth()
y = (window.winfo_screenheight() - window.winfo_reqheight()) / 2
elif position == 7:
x = 0
y = window.winfo_screenheight() - window.winfo_reqheight()
elif position == 8:
x = (window.winfo_screenwidth() - window.winfo_reqwidth()) / 2
y = window.winfo_screenheight() - window.winfo_reqheight()
elif position == 9:
x = window.winfo_screenwidth() - window.winfo_reqwidth()
y = window.winfo_screenheight() - window.winfo_reqheight()
else:
x = 0
y = 0
window.geometry('+%d+%d' % (x, y))
window.update_idletasks()
window.lift()
window.wm_attributes("-topmost", 1)
if not topmost:
window.after_idle(window.attributes, '-topmost', 0)
window.deiconify()
def test_float_positive_input(input_str, typing):
if typing == '1':
try:
if float(input_str) >= 0:
return True
else:
return False
except ValueError:
return False
else:
return True
def run_app():
# #########
# create and initialise the window
# #########
root = Tk()
root2 = Tk()
initialise_window(root, 'Transit simulator', [], [root, root2], False)
initialise_window(root2, 'Transit simulator', [root2], [], False)
# get variables from log and set as tk variables those to be modified
catalogue = plc.oec_catalogue()
planet_search = StringVar(value='HD 209458 b')
planet = StringVar(value='HD 209458 b')
metallicity = DoubleVar(value=0.0)
temperature = DoubleVar(value=0.0)
logg = DoubleVar(value=0.0)
phot_filter = IntVar(value=7)
period = DoubleVar(value=0.0)
rp_over_rs = DoubleVar(value=0.0)
sma_over_rs = DoubleVar(value=0.0)
inclination = DoubleVar(value=0.0)
eccentricity = DoubleVar(value=0.0)
periastron = DoubleVar(value=0.0)
ascending_node = DoubleVar(value=0.0)
# set progress variables, useful for updating the window
update_planet = BooleanVar(root, value=True)
update_planet_list = BooleanVar(root, value=True)
open_root2 = BooleanVar(root, value=False)
# create the plot in the additional window
figure = matplotlib.figure.Figure()
figure.patch.set_facecolor('white')
ax1 = figure.add_subplot(122)
ax2 = figure.add_subplot(221)
ax3 = figure.add_subplot(223)
canvas = FigureCanvasTkAgg(figure, root2)
canvas.get_tk_widget().pack()
NavigationToolbar2TkAgg(canvas, root2)
# create widgets
metallicity_label = Label(root, text='Stellar metallicity (dex)')
metallicity_entry = Scale(root, from_=-5, to=1, resolution=0.5, variable=metallicity, orient=HORIZONTAL)
metallicity_entry.set(metallicity.get())
temperature_label = Label(root, text='Stellar temperature (K)')
temperature_entry = Scale(root, from_=3500, to=7000, resolution=250, variable=temperature, orient=HORIZONTAL)
logg_label = Label(root, text='Stellar surface gravity (cm/s^2, log)')
logg_entry = Scale(root, from_=0.0, to=5.0, resolution=0.5, variable=logg, orient=HORIZONTAL)
available_filters = ['U', 'B', 'V', 'R', 'I', 'J', 'H', 'K']
phot_filter_label = Label(root, text='Filter')
phot_filter_label_2 = Label(root, text=available_filters[phot_filter.get()])
phot_filter_entry = Scale(root, from_=0, to=len(available_filters) - 1, resolution=1,
variable=phot_filter, showvalue=False, orient=HORIZONTAL)
period_label = Label(root, text='Period (days)')
period_entry = Entry(root, textvariable=period, validate='key')
period_entry['validatecommand'] = (period_entry.register(test_float_positive_input), '%P', '%d')
rp_over_rs_label = Label(root, text='Rp/Rs')
rp_over_rs_entry = Scale(root, from_=0, to=0.15, resolution=0.005, variable=rp_over_rs, orient=HORIZONTAL)
sma_over_rs_label = Label(root, text='a/Rs')
sma_over_rs_entry = Scale(root, from_=1, to=20, resolution=0.1, variable=sma_over_rs, orient=HORIZONTAL)
inclination_label = Label(root, text='Inclination (deg)')
inclination_entry = Scale(root, from_=70, to=90, resolution=0.1, variable=inclination, orient=HORIZONTAL)
eccentricity_label = Label(root, text='Eccentricity')
eccentricity_entry = Scale(root, from_=0, to=1, resolution=0.01, variable=eccentricity, orient=HORIZONTAL)
periastron_label = Label(root, text='Periastron (deg)')
periastron_entry = Scale(root, from_=0, to=360, resolution=1, variable=periastron, orient=HORIZONTAL)
ascending_node_label = Label(root, text='Ascending node (deg)')
ascending_node_entry = Scale(root, from_=0, to=360, resolution=1, variable=ascending_node, orient=HORIZONTAL)
planet_label = Label(root, text=' Planet ')
planet_search_entry = Entry(root, textvariable=planet_search)
combostyle = ttk.Style()
combostyle.theme_create('combostyle', parent='alt',
settings={'TCombobox': {'configure':
{'selectbackground': 'white',
'fieldbackground': 'white',
'background': 'white'}}})
combostyle.theme_use('combostyle')
planet_entry = ttk.Combobox(root, textvariable=planet, state='readonly')
search_planet_button = Button(root, text='SEARCH')
plot_button = Button(root, text='PLOT')
exit_transit_simulator_button = Button(root, text='EXIT')
# define the function that updates the window
def update_window(*event):
if not event:
pass
if update_planet_list.get():
if isinstance(catalogue.searchPlanet(planet_search.get()), list):
test_sample = []
for test_planet in catalogue.searchPlanet(planet_search.get()):
if test_planet.isTransiting:
test_sample.append(test_planet)
planet_entry['values'] = tuple([ppp.name for ppp in test_sample])
elif catalogue.searchPlanet(planet_search.get()):
planet_entry['values'] = tuple([catalogue.searchPlanet(planet_search.get()).name])
else:
planet_entry['values'] = tuple([])
if len(planet_entry['values']) == 1:
planet.set(planet_entry['values'][0])
update_planet.set(True)
else:
planet.set('Choose Planet')
update_planet_list.set(False)
if update_planet.get():
parameters = plc.find_oec_parameters(planet.get(), catalogue=catalogue)
logg.set(parameters[1])
temperature.set(parameters[2])
metallicity.set(parameters[3])
rp_over_rs.set(parameters[4])
period.set(parameters[6])
sma_over_rs.set(parameters[7])
eccentricity.set(parameters[8])
inclination.set(parameters[9])
periastron.set(parameters[10])
ascending_node.set(0.0)
update_planet.set(False)
phot_filter_label_2.configure(text=available_filters[phot_filter.get()])
planet_entry.selection_clear()
plot_transit = True
for input_entry in [phot_filter_entry, metallicity_entry, temperature_entry, logg_entry, period_entry,
rp_over_rs_entry, sma_over_rs_entry, inclination_entry, eccentricity_entry,
periastron_entry]:
if len(str(input_entry.get())) == 0:
plot_transit = False
if plot_transit:
if period.get() == 0 or rp_over_rs.get() == 0 or sma_over_rs.get() == 0:
plot_transit = False
if plot_transit:
try:
limb_darkening_coefficients = plc.clablimb('claret', logg.get(), temperature.get(), metallicity.get(),
available_filters[phot_filter.get()])
time_array = np.arange(100 - period.get() / 4, 100 + period.get() / 4, 30. / 24. / 60. / 60.)
position = plc.exoplanet_orbit(period.get(), sma_over_rs.get(), eccentricity.get(), inclination.get(),
periastron.get(), 100, time_array)
time_array = time_array[np.where((np.abs(position[1]) < 1.5) & (position[0] > 0))]
position = plc.exoplanet_orbit(period.get(), sma_over_rs.get(), eccentricity.get(), inclination.get(),
periastron.get(), 100, time_array, ww=ascending_node.get())
transit_light_curve = plc.transit('claret', limb_darkening_coefficients, rp_over_rs.get(), period.get(),
sma_over_rs.get(), eccentricity.get(), inclination.get(),
periastron.get(), 100, time_array)
a1, a2, a3, a4 = limb_darkening_coefficients
star_r = np.arange(0, 1, 0.01)
star_m = np.sqrt(1 - star_r * star_r)
star_i = (1.0 - a1 * (1.0 - star_m ** 0.5) - a2 * (1.0 - star_m)
- a3 * (1.0 - star_m ** 1.5) - a4 * (1.0 - star_m ** 2))
cmap = matplotlib.cm.get_cmap('rainbow')
color = cmap(1 - (temperature.get() - 3500) / (9000 - 3500))
ax2.cla()
ax2.set_aspect('equal')
ax2.tick_params(axis='both', which='both',
bottom='off', left='off', top='off', right='off', labelbottom='off', labelleft='off')
star_circle = matplotlib.patches.Circle((0, 0), 1, color=color, fc=color)
planet_circle = matplotlib.patches.Circle((position[1][len(position[1]) // 2],
position[2][len(position[2]) // 2]),
rp_over_rs.get(), color='k', fc='k')
ax2.add_patch(star_circle)
ax2.add_patch(planet_circle)
ax2.plot(position[1], position[2], c='k')
ax2.set_xlim(-1.5, 1.5)
ax2.set_ylim(-1.5, 1.5)
ax3.cla()
ax3.set_aspect(3.0)
ax3.set_yticks([0, 0.5, 1.0])
ax3.set_xticks([-1.0, 0, 1.0])
ax3.plot(star_r, star_i, c=color)
ax3.plot(-star_r, star_i, c=color)
ax3.set_xlim(-1.5, 1.5)
ax3.set_ylim(0.1, 1.1)
ax3.set_ylabel('I / I0')
ax3.set_xlabel('r / Rs')
ax1.cla()
ax1.plot((time_array - 100) * 24.0 * 60.0, 1000 * transit_light_curve, c='k')
ylim_1 = int(min(transit_light_curve) * 200)
ax1.set_ylim(1000 * ylim_1 / 200.0, 1001)
ax1.tick_params(left='off', right='on', labelleft='off', labelright='on')
ax1.set_ylabel('F / F0 (ppt)')
ax1.set_xlabel('t - T0 (min)')
except IndexError:
ax1.cla()
ax2.cla()
ax3.cla()
canvas.draw()
update_window()
# define actions for the different buttons, including calls to the function that updates the window
def choose_planet(entry):
if not entry:
return 0
update_planet.set(True)
update_window()
def search_planet():
update_planet_list.set(True)
update_window()
def plot():
open_root2.set(True)
update_window()
root2.deiconify()
def exit_transit_simulator():
root.destroy()
root2.destroy()
# connect actions to widgets
planet_entry.bind('<<ComboboxSelected>>', choose_planet)
phot_filter_entry.bind("<ButtonRelease-1>", update_window)
metallicity_entry.bind("<ButtonRelease-1>", update_window)
temperature_entry.bind("<ButtonRelease-1>", update_window)
logg_entry.bind("<ButtonRelease-1>", update_window)
period_entry.bind(sequence='<KeyRelease>', func=update_window)
rp_over_rs_entry.bind("<ButtonRelease-1>", update_window)
sma_over_rs_entry.bind("<ButtonRelease-1>", update_window)
inclination_entry.bind("<ButtonRelease-1>", update_window)
eccentricity_entry.bind("<ButtonRelease-1>", update_window)
periastron_entry.bind("<ButtonRelease-1>", update_window)
ascending_node_entry.bind("<ButtonRelease-1>", update_window)
search_planet_button['command'] = search_planet
plot_button['command'] = plot
exit_transit_simulator_button['command'] = exit_transit_simulator
# setup window
setup_window(root, [
[],
[[phot_filter_label_2, 3]],
[[phot_filter_label, 2], [phot_filter_entry, 3]],
[[metallicity_label, 2], [metallicity_entry, 3]],
[[temperature_label, 2], [temperature_entry, 3]],
[[logg_label, 2], [logg_entry, 3]],
[[planet_label, 1]],
[[planet_search_entry, 1], [period_label, 2], [period_entry, 3]],
[[search_planet_button, 1]],
[[planet_entry, 1], [rp_over_rs_label, 2], [rp_over_rs_entry, 3]],
[[sma_over_rs_label, 2], [sma_over_rs_entry, 3]],
[[inclination_label, 2], [inclination_entry, 3]],
[[plot_button, 1], [eccentricity_label, 2], [eccentricity_entry, 3]],
[[exit_transit_simulator_button, 1], [periastron_label, 2], [periastron_entry, 3]],
[[ascending_node_label, 2], [ascending_node_entry, 3]],
[],
])
# finalise and show window
finalise_window(root, 1)
finalise_window(root2, 3)
root.mainloop() | mit |
lycopoda/pyquan2 | align.py | 1 | 3273 | import numpy as np
class SaveImage():
def __init__(self, align_dir, sample_y):
self._dir = align_dir
self._sample_y = sample_y
def save_image(self, x, y, sample_x, CF):
import matplotlib.pyplot as plt
import os.path
x_line = np.arange(0.,max(x))
y_line = x_line*CF[0]+CF[1]
x = np.array(x)
y = np.array(y)
plt.figure()
plt.scatter(x,y)
plt.plot(x_line, y_line, 'r-')
plt.xlabel(self._sample_y)
plt.ylabel(sample_x)
plot_name = os.path.join(self._dir,'{0}.png'.format(sample_x))
plt.savefig(plot_name)
plt.close()
return
class AlignSample():
def __init__(self, path, sample_y, RT_y):
self._sample_y = sample_y
self._RT_y = RT_y
self._image = SaveImage(path.align_dir, sample_y)
def align_sample(self, sample_x, RT_x):
x=[]
y=[]
for code in self._RT_y:
if code in RT_x:
x.append(RT_x[code])
y.append(self._RT_y[code])
reg = Regression(x,y)
CF = reg.lin_robust()
self._image.save_image(x,y,sample_x,CF)
return CF
class Regression(object):
'''Function first tries to draw a line that connects most points, within a
limited distance (0.01*max y). Next, a linear regression is applied through
these lines.'''
def __init__(self, x, y):
self._x = x
self._y = y
def get_rc_list(self):
self._rc_list=[]
x_set = set()
for i in range(len(self._x)):
x_set.add(i)
for j in range(len(self._x)):
if not j in x_set and not self._x[i] == self._x[j]:
rc = (self._y[i]-self._y[j]) / (self._x[i] - self._x[j])
ic = self._y[i] - self._x[i]*rc
item = (i,j,rc,ic)
self._rc_list.append(item)
return
def get_line_list(self, item, lim):
rc = item[2]
ic = item[3]
idx_list = []
for i in range(len(self._x)):
if abs(rc*self._x[i]+ic - self._y[i]) < lim:
idx_list.append(i)
return idx_list
def lin_robust(self):
lim = 0.01 #max distance from line, in fraction of max(y)
lim = lim * max(self._y)
self.get_rc_list()
line_list = []
for i in self._rc_list:
idx_list = self.get_line_list(i, lim)
line_list.append(idx_list)
max_list = 0
idx_line = None
for j in line_list:
if len(j) > max_list:
max_list = len(j)
idx_line = j
x_list=[]
y_list=[]
for idx in idx_line:
x_list.append(self._x[idx])
y_list.append(self._y[idx])
[rc, ic] = np.polyfit(x_list, y_list, 1)
return rc, ic
def calc_median(x):
sorts = sorted(x)
length =len(sorts)
if length ==1:
return x[0]
elif not length % 2:
return (sorts[length / 2] + sorts[length /2 -1]) /2.0
else:
return sorts[length /2]
| gpl-2.0 |
nelson-liu/scikit-learn | sklearn/mixture/dpgmm.py | 25 | 35852 | """Bayesian Gaussian Mixture Models and
Dirichlet Process Gaussian Mixture Models"""
from __future__ import print_function
# Author: Alexandre Passos ([email protected])
# Bertrand Thirion <[email protected]>
#
# Based on mixture.py by:
# Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# Important note for the deprecation cleaning of 0.20 :
# All the function and classes of this file have been deprecated in 0.18.
# When you remove this file please also remove the related files
# - 'sklearn/mixture/gmm.py'
# - 'sklearn/mixture/test_dpgmm.py'
# - 'sklearn/mixture/test_gmm.py'
import numpy as np
from scipy.special import digamma as _digamma, gammaln as _gammaln
from scipy import linalg
from scipy.spatial.distance import cdist
from ..externals.six.moves import xrange
from ..utils import check_random_state, check_array, deprecated
from ..utils.extmath import logsumexp, pinvh, squared_norm, stable_cumsum
from ..utils.validation import check_is_fitted
from .. import cluster
from .gmm import _GMMBase
@deprecated("The function digamma is deprecated in 0.18 and "
"will be removed in 0.20. Use scipy.special.digamma instead.")
def digamma(x):
return _digamma(x + np.finfo(np.float32).eps)
@deprecated("The function gammaln is deprecated in 0.18 and "
"will be removed in 0.20. Use scipy.special.gammaln instead.")
def gammaln(x):
return _gammaln(x + np.finfo(np.float32).eps)
@deprecated("The function log_normalize is deprecated in 0.18 and "
"will be removed in 0.20.")
def log_normalize(v, axis=0):
"""Normalized probabilities from unnormalized log-probabilites"""
v = np.rollaxis(v, axis)
v = v.copy()
v -= v.max(axis=0)
out = logsumexp(v)
v = np.exp(v - out)
v += np.finfo(np.float32).eps
v /= np.sum(v, axis=0)
return np.swapaxes(v, 0, axis)
@deprecated("The function wishart_log_det is deprecated in 0.18 and "
"will be removed in 0.20.")
def wishart_log_det(a, b, detB, n_features):
"""Expected value of the log of the determinant of a Wishart
The expected value of the logarithm of the determinant of a
wishart-distributed random variable with the specified parameters."""
l = np.sum(digamma(0.5 * (a - np.arange(-1, n_features - 1))))
l += n_features * np.log(2)
return l + detB
@deprecated("The function wishart_logz is deprecated in 0.18 and "
"will be removed in 0.20.")
def wishart_logz(v, s, dets, n_features):
"The logarithm of the normalization constant for the wishart distribution"
z = 0.
z += 0.5 * v * n_features * np.log(2)
z += (0.25 * (n_features * (n_features - 1)) * np.log(np.pi))
z += 0.5 * v * np.log(dets)
z += np.sum(gammaln(0.5 * (v - np.arange(n_features) + 1)))
return z
def _bound_wishart(a, B, detB):
"""Returns a function of the dof, scale matrix and its determinant
used as an upper bound in variational approximation of the evidence"""
n_features = B.shape[0]
logprior = wishart_logz(a, B, detB, n_features)
logprior -= wishart_logz(n_features,
np.identity(n_features),
1, n_features)
logprior += 0.5 * (a - 1) * wishart_log_det(a, B, detB, n_features)
logprior += 0.5 * a * np.trace(B)
return logprior
##############################################################################
# Variational bound on the log likelihood of each class
##############################################################################
def _sym_quad_form(x, mu, A):
"""helper function to calculate symmetric quadratic form x.T * A * x"""
q = (cdist(x, mu[np.newaxis], "mahalanobis", VI=A) ** 2).reshape(-1)
return q
def _bound_state_log_lik(X, initial_bound, precs, means, covariance_type):
"""Update the bound with likelihood terms, for standard covariance types"""
n_components, n_features = means.shape
n_samples = X.shape[0]
bound = np.empty((n_samples, n_components))
bound[:] = initial_bound
if covariance_type in ['diag', 'spherical']:
for k in range(n_components):
d = X - means[k]
bound[:, k] -= 0.5 * np.sum(d * d * precs[k], axis=1)
elif covariance_type == 'tied':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs)
elif covariance_type == 'full':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs[k])
return bound
class _DPGMMBase(_GMMBase):
"""Variational Inference for the Infinite Gaussian Mixture Model.
DPGMM stands for Dirichlet Process Gaussian Mixture Model, and it
is an infinite mixture model with the Dirichlet Process as a prior
distribution on the number of clusters. In practice the
approximate inference algorithm uses a truncated distribution with
a fixed maximum number of components, but almost always the number
of components actually used depends on the data.
Stick-breaking Representation of a Gaussian mixture model
probability distribution. This class allows for easy and efficient
inference of an approximate posterior distribution over the
parameters of a Gaussian mixture model with a variable number of
components (smaller than the truncation parameter n_components).
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Read more in the :ref:`User Guide <dpgmm>`.
Parameters
----------
n_components : int, default 1
Number of mixture components.
covariance_type : string, default 'diag'
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
alpha : float, default 1
Real number representing the concentration parameter of
the dirichlet process. Intuitively, the Dirichlet Process
is as likely to start a new cluster for a point as it is
to add that point to a cluster with alpha elements. A
higher alpha means more clusters, as the expected number
of clusters is ``alpha*log(N)``.
tol : float, default 1e-3
Convergence threshold.
n_iter : int, default 10
Maximum number of iterations to perform before convergence.
params : string, default 'wmc'
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars.
init_params : string, default 'wmc'
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default 0
Controls output verbosity.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_components : int
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
VBGMM : Finite Gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0, min_covar=None,
n_iter=10, params='wmc', init_params='wmc'):
self.alpha = alpha
super(_DPGMMBase, self).__init__(n_components, covariance_type,
random_state=random_state,
tol=tol, min_covar=min_covar,
n_iter=n_iter, params=params,
init_params=init_params,
verbose=verbose)
def _get_precisions(self):
"""Return precisions as a full matrix."""
if self.covariance_type == 'full':
return self.precs_
elif self.covariance_type in ['diag', 'spherical']:
return [np.diag(cov) for cov in self.precs_]
elif self.covariance_type == 'tied':
return [self.precs_] * self.n_components
def _get_covars(self):
return [pinvh(c) for c in self._get_precisions()]
def _set_covars(self, covars):
raise NotImplementedError("""The variational algorithm does
not support setting the covariance parameters.""")
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'gamma_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
z = np.zeros((X.shape[0], self.n_components))
sd = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dgamma1 = digamma(self.gamma_.T[1]) - sd
dgamma2 = np.zeros(self.n_components)
dgamma2[0] = digamma(self.gamma_[0, 2]) - digamma(self.gamma_[0, 1] +
self.gamma_[0, 2])
for j in range(1, self.n_components):
dgamma2[j] = dgamma2[j - 1] + digamma(self.gamma_[j - 1, 2])
dgamma2[j] -= sd[j - 1]
dgamma = dgamma1 + dgamma2
# Free memory and developers cognitive load:
del dgamma1, dgamma2, sd
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dgamma
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
"""Update the concentration parameters for each cluster"""
sz = np.sum(z, axis=0)
self.gamma_.T[1] = 1. + sz
self.gamma_.T[2].fill(0)
for i in range(self.n_components - 2, -1, -1):
self.gamma_[i, 2] = self.gamma_[i + 1, 2] + sz[i]
self.gamma_.T[2] += self.alpha
def _update_means(self, X, z):
"""Update the variational distributions for the means"""
n_features = X.shape[1]
for k in range(self.n_components):
if self.covariance_type in ['spherical', 'diag']:
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num *= self.precs_[k]
den = 1. + self.precs_[k] * np.sum(z.T[k])
self.means_[k] = num / den
elif self.covariance_type in ['tied', 'full']:
if self.covariance_type == 'tied':
cov = self.precs_
else:
cov = self.precs_[k]
den = np.identity(n_features) + cov * np.sum(z.T[k])
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num = np.dot(cov, num)
self.means_[k] = linalg.lstsq(den, num)[0]
def _update_precisions(self, X, z):
"""Update the variational distributions for the precisions"""
n_features = X.shape[1]
if self.covariance_type == 'spherical':
self.dof_ = 0.5 * n_features * np.sum(z, axis=0)
for k in range(self.n_components):
# could be more memory efficient ?
sq_diff = np.sum((X - self.means_[k]) ** 2, axis=1)
self.scale_[k] = 1.
self.scale_[k] += 0.5 * np.sum(z.T[k] * (sq_diff + n_features))
self.bound_prec_[k] = (
0.5 * n_features * (
digamma(self.dof_[k]) - np.log(self.scale_[k])))
self.precs_ = np.tile(self.dof_ / self.scale_, [n_features, 1]).T
elif self.covariance_type == 'diag':
for k in range(self.n_components):
self.dof_[k].fill(1. + 0.5 * np.sum(z.T[k], axis=0))
sq_diff = (X - self.means_[k]) ** 2 # see comment above
self.scale_[k] = np.ones(n_features) + 0.5 * np.dot(
z.T[k], (sq_diff + 1))
self.precs_[k] = self.dof_[k] / self.scale_[k]
self.bound_prec_[k] = 0.5 * np.sum(digamma(self.dof_[k])
- np.log(self.scale_[k]))
self.bound_prec_[k] -= 0.5 * np.sum(self.precs_[k])
elif self.covariance_type == 'tied':
self.dof_ = 2 + X.shape[0] + n_features
self.scale_ = (X.shape[0] + 1) * np.identity(n_features)
for k in range(self.n_components):
diff = X - self.means_[k]
self.scale_ += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_ = pinvh(self.scale_)
self.precs_ = self.dof_ * self.scale_
self.det_scale_ = linalg.det(self.scale_)
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
sum_resp = np.sum(z.T[k])
self.dof_[k] = 2 + sum_resp + n_features
self.scale_[k] = (sum_resp + 1) * np.identity(n_features)
diff = X - self.means_[k]
self.scale_[k] += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_[k] = pinvh(self.scale_[k])
self.precs_[k] = self.dof_[k] * self.scale_[k]
self.det_scale_[k] = linalg.det(self.scale_[k])
self.bound_prec_[k] = 0.5 * wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= 0.5 * self.dof_[k] * np.trace(
self.scale_[k])
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose > 0:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_.T[1])
print("covariance_type:", self.covariance_type)
def _do_mstep(self, X, z, params):
"""Maximize the variational lower bound
Update each of the parameters to maximize the lower bound."""
self._monitor(X, z, "z")
self._update_concentration(z)
self._monitor(X, z, "gamma")
if 'm' in params:
self._update_means(X, z)
self._monitor(X, z, "mu")
if 'c' in params:
self._update_precisions(X, z)
self._monitor(X, z, "a and b", end=True)
def _initialize_gamma(self):
"Initializes the concentration parameters"
self.gamma_ = self.alpha * np.ones((self.n_components, 3))
def _bound_concentration(self):
"""The variational lower bound for the concentration parameter."""
logprior = gammaln(self.alpha) * self.n_components
logprior += np.sum((self.alpha - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior += np.sum(- gammaln(self.gamma_.T[1] + self.gamma_.T[2]))
logprior += np.sum(gammaln(self.gamma_.T[1]) +
gammaln(self.gamma_.T[2]))
logprior -= np.sum((self.gamma_.T[1] - 1) * (
digamma(self.gamma_.T[1]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior -= np.sum((self.gamma_.T[2] - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
return logprior
def _bound_means(self):
"The variational lower bound for the mean parameters"
logprior = 0.
logprior -= 0.5 * squared_norm(self.means_)
logprior -= 0.5 * self.means_.shape[1] * self.n_components
return logprior
def _bound_precisions(self):
"""Returns the bound term related to precisions"""
logprior = 0.
if self.covariance_type == 'spherical':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_
- self.precs_[:, 0])
elif self.covariance_type == 'diag':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_ - self.precs_)
elif self.covariance_type == 'tied':
logprior += _bound_wishart(self.dof_, self.scale_, self.det_scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
logprior += _bound_wishart(self.dof_[k],
self.scale_[k],
self.det_scale_[k])
return logprior
def _bound_proportions(self, z):
"""Returns the bound term related to proportions"""
dg12 = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dg1 = digamma(self.gamma_.T[1]) - dg12
dg2 = digamma(self.gamma_.T[2]) - dg12
cz = stable_cumsum(z[:, ::-1], axis=-1)[:, -2::-1]
logprior = np.sum(cz * dg2[:-1]) + np.sum(z * dg1)
del cz # Save memory
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _logprior(self, z):
logprior = self._bound_concentration()
logprior += self._bound_means()
logprior += self._bound_precisions()
logprior += self._bound_proportions(z)
return logprior
def lower_bound(self, X, z):
"""returns a lower bound on model evidence based on X and membership"""
check_is_fitted(self, 'means_')
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
c = np.sum(z * _bound_state_log_lik(X, self._initial_bound +
self.bound_prec_, self.precs_,
self.means_, self.covariance_type))
return c + self._logprior(z)
def _set_weights(self):
for i in xrange(self.n_components):
self.weights_[i] = self.gamma_[i, 1] / (self.gamma_[i, 1]
+ self.gamma_[i, 2])
self.weights_ /= np.sum(self.weights_)
def _fit(self, X, y=None):
"""Estimate model parameters with the variational
algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating
the object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
self.random_state_ = check_random_state(self.random_state)
# initialization step
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
z = np.ones((n_samples, self.n_components))
z /= self.n_components
self._initial_bound = - 0.5 * n_features * np.log(2 * np.pi)
self._initial_bound -= np.log(2 * np.pi * np.e)
if (self.init_params != '') or not hasattr(self, 'gamma_'):
self._initialize_gamma()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state_).fit(X).cluster_centers_[::-1]
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components, self.n_components)
if 'c' in self.init_params or not hasattr(self, 'precs_'):
if self.covariance_type == 'spherical':
self.dof_ = np.ones(self.n_components)
self.scale_ = np.ones(self.n_components)
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * n_features * (
digamma(self.dof_) - np.log(self.scale_))
elif self.covariance_type == 'diag':
self.dof_ = 1 + 0.5 * n_features
self.dof_ *= np.ones((self.n_components, n_features))
self.scale_ = np.ones((self.n_components, n_features))
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * (np.sum(digamma(self.dof_) -
np.log(self.scale_), 1))
self.bound_prec_ -= 0.5 * np.sum(self.precs_, 1)
elif self.covariance_type == 'tied':
self.dof_ = 1.
self.scale_ = np.identity(n_features)
self.precs_ = np.identity(n_features)
self.det_scale_ = 1.
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
self.dof_ = (1 + self.n_components + n_samples)
self.dof_ *= np.ones(self.n_components)
self.scale_ = [2 * np.identity(n_features)
for _ in range(self.n_components)]
self.precs_ = [np.identity(n_features)
for _ in range(self.n_components)]
self.det_scale_ = np.ones(self.n_components)
self.bound_prec_ = np.zeros(self.n_components)
for k in range(self.n_components):
self.bound_prec_[k] = wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= (self.dof_[k] *
np.trace(self.scale_[k]))
self.bound_prec_ *= 0.5
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
prev_log_likelihood = current_log_likelihood
# Expectation step
curr_logprob, z = self.score_samples(X)
current_log_likelihood = (
curr_logprob.mean() + self._logprior(z) / n_samples)
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if change < self.tol:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, z, self.params)
if self.n_iter == 0:
# Need to make sure that there is a z value to output
# Output zeros because it was just a quick initialization
z = np.zeros((X.shape[0], self.n_components))
self._set_weights()
return z
@deprecated("The `DPGMM` class is not working correctly and it's better "
"to use `sklearn.mixture.BayesianGaussianMixture` class with "
"parameter `weight_concentration_prior_type='dirichlet_process'` "
"instead. DPGMM is deprecated in 0.18 and will be "
"removed in 0.20.")
class DPGMM(_DPGMMBase):
"""Dirichlet Process Gaussian Mixture Models
.. deprecated:: 0.18
This class will be removed in 0.20.
Use :class:`sklearn.mixture.BayesianGaussianMixture` with
parameter ``weight_concentration_prior_type='dirichlet_process'``
instead.
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0, min_covar=None,
n_iter=10, params='wmc', init_params='wmc'):
super(DPGMM, self).__init__(
n_components=n_components, covariance_type=covariance_type,
alpha=alpha, random_state=random_state, tol=tol, verbose=verbose,
min_covar=min_covar, n_iter=n_iter, params=params,
init_params=init_params)
@deprecated("The `VBGMM` class is not working correctly and it's better "
"to use `sklearn.mixture.BayesianGaussianMixture` class with "
"parameter `weight_concentration_prior_type="
"'dirichlet_distribution'` instead. "
"VBGMM is deprecated in 0.18 and will be removed in 0.20.")
class VBGMM(_DPGMMBase):
"""Variational Inference for the Gaussian Mixture Model
.. deprecated:: 0.18
This class will be removed in 0.20.
Use :class:`sklearn.mixture.BayesianGaussianMixture` with parameter
``weight_concentration_prior_type='dirichlet_distribution'`` instead.
Variational inference for a Gaussian mixture model probability
distribution. This class allows for easy and efficient inference
of an approximate posterior distribution over the parameters of a
Gaussian mixture model with a fixed number of components.
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Read more in the :ref:`User Guide <vbgmm>`.
Parameters
----------
n_components : int, default 1
Number of mixture components.
covariance_type : string, default 'diag'
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
alpha : float, default 1
Real number representing the concentration parameter of
the dirichlet distribution. Intuitively, the higher the
value of alpha the more likely the variational mixture of
Gaussians model will use all components it can.
tol : float, default 1e-3
Convergence threshold.
n_iter : int, default 10
Maximum number of iterations to perform before convergence.
params : string, default 'wmc'
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars.
init_params : string, default 'wmc'
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default 0
Controls output verbosity.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussians.
n_components : int (read-only)
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False
otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
DPGMM : Infinite Gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0,
min_covar=None, n_iter=10, params='wmc', init_params='wmc'):
super(VBGMM, self).__init__(
n_components, covariance_type, random_state=random_state,
tol=tol, verbose=verbose, min_covar=min_covar,
n_iter=n_iter, params=params, init_params=init_params)
self.alpha = alpha
def _fit(self, X, y=None):
"""Estimate model parameters with the variational algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html
A initialization step is performed before entering the EM
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating
the object. Likewise, if you just would like to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
self.alpha_ = float(self.alpha) / self.n_components
return super(VBGMM, self)._fit(X, y)
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'gamma_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
dg = digamma(self.gamma_) - digamma(np.sum(self.gamma_))
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dg
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
for i in range(self.n_components):
self.gamma_[i] = self.alpha_ + np.sum(z.T[i])
def _initialize_gamma(self):
self.gamma_ = self.alpha_ * np.ones(self.n_components)
def _bound_proportions(self, z):
logprior = 0.
dg = digamma(self.gamma_)
dg -= digamma(np.sum(self.gamma_))
logprior += np.sum(dg.reshape((-1, 1)) * z.T)
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _bound_concentration(self):
logprior = 0.
logprior = gammaln(np.sum(self.gamma_)) - gammaln(self.n_components
* self.alpha_)
logprior -= np.sum(gammaln(self.gamma_) - gammaln(self.alpha_))
sg = digamma(np.sum(self.gamma_))
logprior += np.sum((self.gamma_ - self.alpha_)
* (digamma(self.gamma_) - sg))
return logprior
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose > 0:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_)
print("covariance_type:", self.covariance_type)
def _set_weights(self):
self.weights_[:] = self.gamma_
self.weights_ /= np.sum(self.weights_)
| bsd-3-clause |
wzbozon/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
Phobia0ptik/ThinkStats2 | code/hinc_soln.py | 67 | 4296 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import numpy as np
import pandas
import hinc
import thinkplot
import thinkstats2
"""This file contains a solution to an exercise in Think Stats:
The distributions of wealth and income are sometimes modeled using
lognormal and Pareto distributions. To see which is better, let's
look at some data.
The Current Population Survey (CPS) is joint effort of the Bureau
of Labor Statistics and the Census Bureau to study income and related
variables. Data collected in 2013 is available from
http://www.census.gov/hhes/www/cpstables/032013/hhinc/toc.htm.
I downloaded hinc06.xls, which is an Excel spreadsheet with
information about household income, and converted it to hinc06.csv,
a CSV file you will find in the repository for this book. You
will also find hinc.py, which reads the CSV file.
Extract the distribution of incomes from this dataset. Are any of the
analytic distributions in this chapter a good model of the data? A
solution to this exercise is in hinc_soln.py.
My solution generates three figures:
1) The CDF of income on a linear scale.
2) The CCDF on a log-log scale along with a Pareto model intended
to match the tail behavior.
3) The CDF on a log-x scale along with a lognormal model chose to
match the median and inter-quartile range.
My conclusions based on these figures are:
1) The Pareto model is probably a reasonable choice for the top
10-20% of incomes.
2) The lognormal model captures the shape of the distribution better,
but the data deviate substantially from the model. With different
choices for sigma, you could match the upper or lower tail, but not
both at the same time.
In summary I would say that neither model captures the whole distribution,
so you might have to
1) look for another analytic model,
2) choose one that captures the part of the distribution that is most
relevent, or
3) avoid using an analytic model altogether.
"""
class SmoothCdf(thinkstats2.Cdf):
"""Represents a CDF based on calculated quantiles.
"""
def Render(self):
"""Because this CDF was not computed from a sample, it
should not be rendered as a step function.
"""
return self.xs, self.ps
def Prob(self, x):
"""Compute CDF(x), interpolating between known values.
"""
return np.interp(x, self.xs, self.ps)
def Value(self, p):
"""Compute inverse CDF(x), interpolating between probabilities.
"""
return np.interp(p, self.ps, self.xs)
def MakeFigures(df):
"""Plots the CDF of income in several forms.
"""
xs, ps = df.income.values, df.ps.values
cdf = SmoothCdf(xs, ps, label='data')
cdf_log = SmoothCdf(np.log10(xs), ps, label='data')
# linear plot
thinkplot.Cdf(cdf)
thinkplot.Save(root='hinc_linear',
xlabel='household income',
ylabel='CDF')
# pareto plot
# for the model I chose parameters by hand to fit the tail
xs, ys = thinkstats2.RenderParetoCdf(xmin=55000, alpha=2.5,
low=0, high=250000)
thinkplot.Plot(xs, 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf, complement=True)
thinkplot.Save(root='hinc_pareto',
xlabel='log10 household income',
ylabel='CCDF',
xscale='log',
yscale='log')
# lognormal plot
# for the model I estimate mu and sigma using
# percentile-based statistics
median = cdf_log.Percentile(50)
iqr = cdf_log.Percentile(75) - cdf_log.Percentile(25)
std = iqr / 1.349
# choose std to match the upper tail
std = 0.35
print(median, std)
xs, ps = thinkstats2.RenderNormalCdf(median, std, low=3.5, high=5.5)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Save(root='hinc_normal',
xlabel='log10 household income',
ylabel='CDF')
def main():
df = hinc.ReadData()
MakeFigures(df)
if __name__ == "__main__":
main()
| gpl-3.0 |
anilcs13m/content | labs/lab2/cs109style.py | 38 | 1293 | from __future__ import print_function
from IPython.core.display import HTML
from matplotlib import rcParams
#colorbrewer2 Dark2 qualitative color table
dark2_colors = [(0.10588235294117647, 0.6196078431372549, 0.4666666666666667),
(0.8509803921568627, 0.37254901960784315, 0.00784313725490196),
(0.4588235294117647, 0.4392156862745098, 0.7019607843137254),
(0.9058823529411765, 0.1607843137254902, 0.5411764705882353),
(0.4, 0.6509803921568628, 0.11764705882352941),
(0.9019607843137255, 0.6705882352941176, 0.00784313725490196),
(0.6509803921568628, 0.4627450980392157, 0.11372549019607843),
(0.4, 0.4, 0.4)]
def customize_mpl():
"""Tweak matplotlib visual style"""
print("Setting custom matplotlib visual style")
rcParams['figure.figsize'] = (10, 6)
rcParams['figure.dpi'] = 150
rcParams['axes.color_cycle'] = dark2_colors
rcParams['lines.linewidth'] = 2
rcParams['axes.grid'] = True
rcParams['axes.facecolor'] = '#eeeeee'
rcParams['font.size'] = 14
rcParams['patch.edgecolor'] = 'none'
def customize_css():
print("Setting custom CSS for the IPython Notebook")
styles = open('custom.css', 'r').read()
return HTML(styles)
| mit |
kmike/scikit-learn | sklearn/tests/test_cross_validation.py | 2 | 21382 | """Test the cross_validation module"""
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.fixes import unique
from sklearn import cross_validation as cval
from sklearn.base import BaseEstimator
from sklearn.datasets import make_regression
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import explained_variance_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import Scorer
from sklearn.externals import six
from sklearn.linear_model import Ridge
from sklearn.svm import SVC
class MockListClassifier(BaseEstimator):
"""Dummy classifier to test the cross-validation.
Checks that GridSearchCV didn't convert X to array.
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
assert_true(isinstance(X, list))
return self
def predict(self, T):
return T.shape[0]
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
class MockClassifier(BaseEstimator):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0):
self.a = a
def fit(self, X, Y=None, sample_weight=None, class_prior=None):
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
return self
def predict(self, T):
return T.shape[0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
y = np.arange(10) / 2
##############################################################################
# Tests
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
y = [0, 0, 1, 1, 2]
cval.StratifiedKFold(y, 3)
# checking there was only one warning.
assert_equal(len(w), 1)
# checking it has the right type
assert_equal(w[0].category, Warning)
# checking it's the right warning. This might be a bad test since it's
# a characteristic of the code and not a behavior
assert_true("The least populated class" in str(w[0]))
# Error when number of folds is <= 0
assert_raises(ValueError, cval.KFold, 2, 0)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 1)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
all_folds = None
for train, test in kf:
if all_folds is None:
all_folds = test.copy()
else:
all_folds = np.concatenate((all_folds, test))
all_folds.sort()
assert_array_equal(all_folds, np.arange(300))
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf1 = cval.KFold(300, 3, shuffle=True, random_state=0, indices=True)
kf2 = cval.KFold(300, 3, shuffle=True, random_state=0, indices=False)
ind = np.arange(300)
for kf in (kf1, kf2):
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0, indices=True)
for train, test in sss:
assert_array_equal(unique(y[train]), unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(unique(y[train], return_inverse=True)[1]) /
float(len(y[train])))
p_test = (np.bincount(unique(y[test], return_inverse=True)[1]) /
float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_iter_no_indices():
y = np.asarray([0, 1, 2] * 10)
sss1 = cval.StratifiedShuffleSplit(y, indices=False, random_state=0)
train_mask, test_mask = next(iter(sss1))
sss2 = cval.StratifiedShuffleSplit(y, indices=True, random_state=0)
train_indices, test_indices = next(iter(sss2))
assert_array_equal(sorted(test_indices), np.where(test_mask)[0])
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with X as list
clf = MockListClassifier()
scores = cval.cross_val_score(clf, X.tolist(), y)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
score = cval.cross_val_score(clf, X, y, score_func=score_func)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = range(10)
split = cval.train_test_split(X, X_s, y)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [1., 0.97, 0.90, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [1., 0.97, 0.90, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1", cv=5)
assert_array_almost_equal(f1_scores, [1., 0.97, 0.90, 0.97, 1.], 2)
# also test deprecated old way
with warnings.catch_warnings(record=True):
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
score_func=f1_score, cv=5)
assert_array_almost_equal(f1_scores, [1., 0.97, 0.90, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error
mse_scores = cval.cross_val_score(reg, X, y, cv=5, scoring="mse")
expected_mse = np.array([763.07, 553.16, 274.38, 273.26, 1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
with warnings.catch_warnings(record=True):
ev_scores = cval.cross_val_score(reg, X, y, cv=5,
score_func=explained_variance_score)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, "accuracy", cv)
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, "accuracy", cv, labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
scorer = Scorer(fbeta_score, beta=2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, scoring=scorer, cv=cv, labels=np.ones(y.size),
random_state=0)
assert_almost_equal(score_label, .95, 2)
assert_almost_equal(pvalue_label, 0.01, 3)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2, indices=True)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, "accuracy", cv_sparse,
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(svm, X, y,
"accuracy", cv)
assert_less(score, 0.5)
assert_greater(pvalue, 0.4)
# test with deprecated interface
with warnings.catch_warnings(record=True):
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, score_func=accuracy_score, cv=cv)
assert_less(score, 0.5)
assert_greater(pvalue, 0.4)
def test_cross_val_generator_with_mask():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4, indices=False)
lpo = cval.LeavePOut(4, 2, indices=False)
kf = cval.KFold(4, 2, indices=False)
skf = cval.StratifiedKFold(y, 2, indices=False)
lolo = cval.LeaveOneLabelOut(labels, indices=False)
lopo = cval.LeavePLabelOut(labels, 2, indices=False)
ss = cval.ShuffleSplit(4, indices=False)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss]:
for train, test in cv:
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4, indices=True)
lpo = cval.LeavePOut(4, 2, indices=True)
kf = cval.KFold(4, 2, indices=True)
skf = cval.StratifiedKFold(y, 2, indices=True)
lolo = cval.LeaveOneLabelOut(labels, indices=True)
lopo = cval.LeavePLabelOut(labels, 2, indices=True)
b = cval.Bootstrap(2) # only in index mode
ss = cval.ShuffleSplit(2, indices=True)
for cv in [loo, lpo, kf, skf, lolo, lopo, b, ss]:
for train, test in cv:
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
def test_cross_val_generator_mask_indices_same():
# Test that the cross validation generators return the same results when
# indices=True and when indices=False
y = np.array([0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2])
labels = np.array([1, 1, 2, 3, 3, 3, 4])
loo_mask = cval.LeaveOneOut(5, indices=False)
loo_ind = cval.LeaveOneOut(5, indices=True)
lpo_mask = cval.LeavePOut(10, 2, indices=False)
lpo_ind = cval.LeavePOut(10, 2, indices=True)
kf_mask = cval.KFold(10, 5, indices=False, shuffle=True, random_state=1)
kf_ind = cval.KFold(10, 5, indices=True, shuffle=True, random_state=1)
skf_mask = cval.StratifiedKFold(y, 3, indices=False)
skf_ind = cval.StratifiedKFold(y, 3, indices=True)
lolo_mask = cval.LeaveOneLabelOut(labels, indices=False)
lolo_ind = cval.LeaveOneLabelOut(labels, indices=True)
lopo_mask = cval.LeavePLabelOut(labels, 2, indices=False)
lopo_ind = cval.LeavePLabelOut(labels, 2, indices=True)
for cv_mask, cv_ind in [(loo_mask, loo_ind), (lpo_mask, lpo_ind),
(kf_mask, kf_ind), (skf_mask, skf_ind),
(lolo_mask, lolo_ind), (lopo_mask, lopo_ind)]:
for (train_mask, test_mask), (train_ind, test_ind) in \
zip(cv_mask, cv_ind):
assert_array_equal(np.where(train_mask)[0], train_ind)
assert_array_equal(np.where(test_mask)[0], test_ind)
def test_bootstrap_errors():
assert_raises(ValueError, cval.Bootstrap, 10, train_size=100)
assert_raises(ValueError, cval.Bootstrap, 10, test_size=100)
assert_raises(ValueError, cval.Bootstrap, 10, train_size=1.1)
assert_raises(ValueError, cval.Bootstrap, 10, test_size=1.1)
def test_bootstrap_test_sizes():
assert_equal(cval.Bootstrap(10, test_size=0.2).test_size, 2)
assert_equal(cval.Bootstrap(10, test_size=2).test_size, 2)
assert_equal(cval.Bootstrap(10, test_size=None).test_size, 5)
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_cross_indices_exception():
X = coo_matrix(np.array([[1, 2], [3, 4], [5, 6], [7, 8]]))
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4, indices=False)
lpo = cval.LeavePOut(4, 2, indices=False)
kf = cval.KFold(4, 2, indices=False)
skf = cval.StratifiedKFold(y, 2, indices=False)
lolo = cval.LeaveOneLabelOut(labels, indices=False)
lopo = cval.LeavePLabelOut(labels, 2, indices=False)
assert_raises(ValueError, cval.check_cv, loo, X, y)
assert_raises(ValueError, cval.check_cv, lpo, X, y)
assert_raises(ValueError, cval.check_cv, kf, X, y)
assert_raises(ValueError, cval.check_cv, skf, X, y)
assert_raises(ValueError, cval.check_cv, lolo, X, y)
assert_raises(ValueError, cval.check_cv, lopo, X, y)
| bsd-3-clause |
DiCarloLab-Delft/PycQED_py3 | pycqed/instrument_drivers/meta_instrument/qubit_objects/mock_CCL_Transmon.py | 1 | 53359 | from .CCL_Transmon import CCLight_Transmon
import os
import time
import numpy as np
import logging
from pycqed.measurement import sweep_functions as swf
from pycqed.measurement import detector_functions as det
from pycqed.analysis import measurement_analysis as ma
from pycqed.analysis_v2 import measurement_analysis as ma2
from pycqed.analysis_v2 import spectroscopy_analysis as sa2
import pycqed.analysis.analysis_toolbox as a_tools
from qcodes.instrument.parameter import ManualParameter
from qcodes.utils import validators as vals
from autodepgraph import AutoDepGraph_DAG
import matplotlib.pyplot as plt
from pycqed.analysis import fitting_models as fm
class Mock_CCLight_Transmon(CCLight_Transmon):
def __init__(self, name, **kw):
super().__init__(name, **kw)
self.add_mock_params()
def add_mock_params(self):
"""
Add qubit parameters that are used to mock the system.
These parameters are
- prefixed with `mock_`
- describe "hidden" parameters to mock real experiments.
"""
# Qubit resonator
self.add_parameter('mock_freq_res_bare', label='bare resonator freq',
unit='Hz', parameter_class=ManualParameter,
initial_value=7.487628e9)
self.add_parameter('mock_Qe', parameter_class=ManualParameter,
initial_value=13945)
self.add_parameter('mock_Q', parameter_class=ManualParameter,
initial_value=8459)
self.add_parameter('mock_theta', parameter_class=ManualParameter,
initial_value=-0.1)
self.add_parameter('mock_slope', parameter_class=ManualParameter,
initial_value=0)
self.add_parameter('mock_phi_I', parameter_class=ManualParameter,
initial_value=0)
self.add_parameter('mock_phi_0', parameter_class=ManualParameter,
initial_value=0)
self.add_parameter('mock_pow_shift',
label='power needed for low to high regime',
unit='dBm', initial_value=20,
parameter_class=ManualParameter)
# Test resonator
self.add_parameter('mock_freq_test_res',
label='test resonator frequency',
unit='Hz', parameter_class=ManualParameter,
initial_value=7.76459e9)
self.add_parameter('mock_test_Qe', parameter_class=ManualParameter,
initial_value=1.8e6)
self.add_parameter('mock_test_Q', parameter_class=ManualParameter,
initial_value=1e6)
self.add_parameter('mock_test_theta', parameter_class=ManualParameter,
initial_value=-0.1)
self.add_parameter('mock_test_slope', parameter_class=ManualParameter,
initial_value=0)
self.add_parameter('mock_test_phi_I', parameter_class=ManualParameter,
initial_value=0)
self.add_parameter('mock_test_phi_0', parameter_class=ManualParameter,
initial_value=0)
# Qubit
self.add_parameter('mock_Ec', label='charging energy', unit='Hz',
parameter_class=ManualParameter,
initial_value=266.3e6)
self.add_parameter('mock_Ej1', label='josephson energy', unit='Hz',
parameter_class=ManualParameter,
initial_value=8.647e9)
self.add_parameter('mock_Ej2', label='josephson energy', unit='Hz',
parameter_class=ManualParameter,
initial_value=8.943e9)
self.add_parameter('mock_freq_qubit_bare', label='qubit frequency',
unit='Hz',
initial_value=(np.sqrt(8*self.mock_Ec() *
(self.mock_Ej1()+self.mock_Ej2())) -
self.mock_Ec()),
parameter_class=ManualParameter)
self.add_parameter('mock_anharmonicity', label='anharmonicity',
unit='Hz', initial_value=self.mock_Ec(),
parameter_class=ManualParameter)
# Qubit flux
self.add_parameter('mock_fl_dc_I_per_phi0', unit='A/Wb',
initial_value={'FBL_Q1': 20e-3,
'FBL_Q2': 2},
parameter_class=ManualParameter)
self.add_parameter('mock_sweetspot_phi_over_phi0',
label='magnitude of sweetspot flux',
unit='-', parameter_class=ManualParameter,
initial_value=0.02)
self.add_parameter('mock_fl_dc_ch',
label='most closely coupled fluxline',
unit='', initial_value='FBL_Q1',
parameter_class=ManualParameter)
# Qubit-resonator interaction
self.add_parameter('mock_coupling01',
label='coupling qubit to resonator', unit='Hz',
initial_value=60e6, parameter_class=ManualParameter)
self.add_parameter('mock_coupling12',
label='coupling 12 transition to resonator',
unit='Hz', initial_value=60e6,
parameter_class=ManualParameter)
self.add_parameter('mock_chi01', label='coupling 01 transition',
unit='Hz',
initial_value=(self.mock_coupling01())**2 /
(self.mock_freq_qubit_bare() -
self.mock_freq_res_bare()),
parameter_class=ManualParameter)
self.add_parameter('mock_chi12', label='coupling 12 transition',
unit='Hz',
initial_value=(self.mock_coupling12())**2 /
(self.mock_freq_qubit_bare() +
self.mock_anharmonicity() -
self.mock_freq_res_bare()),
parameter_class=ManualParameter)
self.add_parameter('mock_chi', label='dispersive shift', unit='Hz',
initial_value=self.mock_chi01()-self.mock_chi12()/2,
parameter_class=ManualParameter)
# Readout parameters
self.add_parameter('mock_ro_pulse_amp_CW',
label='Readout pulse amplitude',
unit='Hz', parameter_class=ManualParameter,
initial_value=0.048739)
self.add_parameter('mock_spec_pow', label='optimal spec power',
unit='dBm', initial_value=-35,
parameter_class=ManualParameter)
self.add_parameter('mock_12_spec_amp',
label='amplitude for 12 transition', unit='-',
initial_value=0.5, parameter_class=ManualParameter)
self.add_parameter('mock_mw_amp180', label='Pi-pulse amplitude',
unit='V', initial_value=0.41235468,
parameter_class=ManualParameter)
self.add_parameter('noise', label='nominal noise level', unit='V',
initial_value=0.16e-3,
parameter_class=ManualParameter)
# Qubit characteristics
self.add_parameter('mock_T1', label='relaxation time', unit='s',
initial_value=29e-6,
parameter_class=ManualParameter)
self.add_parameter('mock_T2_star', label='Ramsey T2', unit='s',
initial_value=23.478921e-6,
parameter_class=ManualParameter)
self.add_parameter('mock_T2_echo', label='Echo T2', unit='s',
initial_value=46.2892e-6,
parameter_class=ManualParameter)
def find_spec_pow(self, freqs=None, powers=None, update=True):
'''
Should find the optimal spectroscopy power where the A/w ratio is
at a maximum
'''
if freqs is None:
freq_center = self.freq_qubit()
freq_range = 200e6
freqs = np.arange(freq_center-freq_range/2,
freq_center+freq_range/2, 0.5e6)
if powers is None:
powers = np.arange(-40, -9, 2)
w = []
A = []
t_start = time.strftime('%Y%m%d_%H%M%S')
for i, power in enumerate(powers):
self.spec_pow(power)
self.measure_spectroscopy(freqs=freqs, analyze=False)
a = ma.Homodyne_Analysis(label=self.msmt_suffix,
fitting_model='lorentzian',
qb_name=self.name)
w.append(a.params['kappa'].value)
A.append(a.params['A'].value)
t_stop = time.strftime('%Y%m%d_%H%M%S')
Awratio = np.divide(A, w)
ind = np.argmax(Awratio)
# Should be some analysis and iterative method to
best_spec_pow = powers[ind]
# find the optimum
print('from find_spec_pow:' + str(best_spec_pow))
if update:
import pycqed.analysis_v2.spectroscopy_analysis as sa
a = sa.SpecPowAnalysis(t_start=t_start, t_stop=t_stop,
label='spectroscopy_'+self.msmt_suffix,
pow_key='Instrument settings.'+self.name +
'.spec_pow.value')
best_spec_pow = a.fit_res['spec_pow']
self.spec_pow(best_spec_pow)
print(self.spec_pow())
return True
###########################################################################
# Mock measurement methods
###########################################################################
def measure_with_VNA(self, start_freq, stop_freq, npts,
MC=None, name='Mock_VNA', analyze=True):
"""
Returns a SlopedHangerFuncAmplitude shaped function
"""
VNA = self.instr_VNA.get_instr()
VNA.start_frequency(start_freq)
VNA.stop_frequency(stop_freq)
VNA.npts(npts)
if MC is None:
MC = self.instr_MC.get_instr()
s = swf.None_Sweep(name='Frequency', parameter_name='Frequency',
unit='Hz')
freqs = np.linspace(VNA.start_frequency(), VNA.stop_frequency(),
VNA.npts())
f0_res = self.calculate_mock_resonator_frequency()/1e9
f0_test = self.mock_freq_test_res()/1e9
Q_test = 346215
Qe_test = 368762
Q = 290000
Qe = 255000
A = 0.37
theta = -0.1
slope = 0
f = freqs
A_res = np.abs((slope * (f / 1.e9 - f0_res) / f0_res) *
fm.HangerFuncAmplitude(f, f0_res, Q, Qe, A, theta))
A_test = np.abs((slope * (f / 1.e9 - f0_test) / f0_test) *
fm.HangerFuncAmplitude(f, f0_res, Q_test,
Qe_test, A, theta))
A_res = np.abs(A * (1. - Q / Qe * np.exp(1.j * theta) /
(1. + 2.j * Q * (f / 1.e9 - f0_res) / f0_res)))
A_test = np.abs(A * (1. - Q_test / Qe_test * np.exp(1.j * theta) /
(1. + 2.j * Q_test * (f / 1.e9 - f0_test) / f0_test)))
baseline = 0.40
A_res -= A
A_test -= A
mocked_values = A + A_res + A_test
mocked_values += np.random.normal(0, A/500, np.size(mocked_values))
d = det.Mock_Detector(value_names=['ampl'], value_units=['V'],
detector_control='soft',
mock_values=mocked_values)
MC.set_sweep_function(s)
MC.set_sweep_points(freqs)
MC.set_detector_function(d)
MC.run(name + self.msmt_suffix)
if analyze:
ma2.Basic1DAnalysis()
def measure_spectroscopy(self, freqs, mode='pulsed_marked', MC=None,
analyze=True, close_fig=True, label='',
prepare_for_continuous_wave=True):
'''
Can be made fancier by implementing different types of spectroscopy
(e.g. pulsed/CW) and by imp using cfg_spec_mode.
Uses a Lorentzian as a result for now
'''
if MC is None:
MC = self.instr_MC.get_instr()
s = swf.None_Sweep(name='Homodyne Frequency',
parameter_name='Frequency',
unit='Hz')
h = self.measurement_signal(excited=False) # Lorentian baseline [V]
A0 = self.measurement_signal(excited=True) - h # Peak height
# Height of peak [V]
K_power = 1/np.sqrt(1+15**(-(self.spec_pow()-self.mock_spec_pow())/7))
# K_current = np.sqrt(np.abs(np.cos(2*np.pi*total_flux)))
A = K_power*A0 # K_current*
# Width of peak
wbase = 4e6
w = (wbase /
np.sqrt(0.1+10**(-(self.spec_pow()-self.mock_spec_pow()/2)/7)) +
wbase)
f0 = self.calculate_mock_qubit_frequency()
peak_01 = A*(w/2.0)**2 / ((w/2.0)**2 + ((freqs - f0))**2)
# 1-2 transition:
if self.spec_amp() > self.mock_12_spec_amp() and self.spec_pow() >= -10:
A12 = A*0.5
w12 = 1e6
f02over2 = f0 - self.mock_anharmonicity()/2
peak_02 = A12*(w12/2.0)**2 / ((w12/2.0)**2 + ((freqs-f02over2))**2)
else:
peak_02 = 0
mocked_values = h + peak_01 + peak_02
mocked_values += np.random.normal(0,
self.noise()/np.sqrt(self.ro_acq_averages()), np.size(mocked_values))
d = det.Mock_Detector(value_names=['Magnitude'], value_units=['V'],
detector_control='soft',
mock_values=mocked_values)
MC.set_sweep_function(s)
MC.set_sweep_points(freqs)
MC.set_detector_function(d)
MC.run('mock_spectroscopy_'+self.msmt_suffix+label)
if analyze:
a = ma.Homodyne_Analysis(
label=self.msmt_suffix, close_fig=close_fig)
return a.params['f0'].value
def measure_resonator_power(self, freqs, powers, MC=None,
analyze: bool = True, close_fig: bool = True,
fluxChan=None, label: str = ''):
if MC is None:
MC = self.instr_MC.get_instr()
s1 = swf.None_Sweep(name='Heterodyne Frequency',
parameter_name='Frequency',
unit='Hz')
s2 = swf.None_Sweep(name='Readout Power', parameter_name='Power',
unit='dBm')
mocked_values = []
try:
device = self.instr_device.get_instr()
qubit_names = device.qubits()
# Create linecuts:
for power in powers:
h = 10**(power/20)*10e-3
new_values = h
for name in qubit_names:
if name != 'fakequbit':
qubit = device.find_instrument(name)
f0, response = qubit.calculate_mock_resonator_response(
power, freqs)
new_values += response
mocked_values = np.concatenate([mocked_values, new_values])
except AttributeError:
logging.warning('No device found! Using this mock only for for '
'resonator frequencies')
for power in powers:
h = 10**(power/20)*10e-3
new_values = h + self.calculate_mock_resonator_response(power,
freqs)
mocked_values = np.concatenate([mocked_values, new_values])
mocked_values += np.random.normal(0,
self.noise()/np.sqrt(self.ro_acq_averages()), np.size(mocked_values))
mocked_values = np.abs(mocked_values)
d = det.Mock_Detector(value_names=['Magnitude'], value_units=['V'],
detector_control='soft',
mock_values=mocked_values)
MC.set_sweep_function(s1)
MC.set_sweep_function_2D(s2)
MC.set_sweep_points(freqs)
MC.set_sweep_points_2D(powers)
MC.set_detector_function(d)
MC.run('Resonator_power_scan'+self.msmt_suffix + label, mode='2D')
if analyze:
# ma.TwoD_Analysis(label='Resonator_power_scan',
# close_fig=close_fig, normalize=True)
a = ma.Resonator_Powerscan_Analysis(label='Resonator_power_scan',
close_figig=True)
return a
def measure_heterodyne_spectroscopy(self, freqs, MC=None, analyze=True,
close_fig=True, label=''):
'''
For finding resonator frequencies. Uses a lorentzian fit for now, might
be extended in the future
Dependent on readout power: too high power will yield wrong frequency
'''
if MC is None:
MC = self.instr_MC.get_instr()
s = swf.None_Sweep(name='Heterodyne Frequency',
parameter_name='Frequency',
unit='Hz')
# Mock Values, matches resonator power scan:
# Find all resonator frequencies:
power = 20*np.log10(self.ro_pulse_amp_CW())
dips = []
try:
device = self.instr_device.get_instr()
qubit_names = device.qubits()
for name in qubit_names:
if name != 'fakequbit':
qubit = device.find_instrument(name)
freq, dip = qubit.calculate_mock_resonator_response(power,
freqs)
dips.append(dip)
except AttributeError:
logging.warning('No device found! Using this mock only for for '
'resonator frequencies')
freq, dips = self.calculate_mock_resonator_response(power,
freqs)
h = 10**(power/20)*10e-3
mocked_values = h
for dip in dips:
mocked_values += dip
mocked_values += np.random.normal(0,
self.noise()/np.sqrt(self.ro_acq_averages()),
np.size(mocked_values))
d = det.Mock_Detector(value_names=['Magnitude'], value_units=['V'],
detector_control='soft',
mock_values=mocked_values)
MC.set_sweep_function(s)
MC.set_sweep_points(freqs)
MC.set_detector_function(d)
MC.run('Resonator_scan'+self.msmt_suffix+label)
if analyze:
ma.Homodyne_Analysis(label='Resonator_scan', close_fig=close_fig)
def measure_qubit_frequency_dac_scan(self, freqs, dac_values,
MC=None, analyze=True, fluxChan=None,
close_fig=True, mode='pulsed_marked',
nested_resonator_calibration=False,
resonator_freqs=None):
if MC is None:
MC = self.instr_MC.get_instr()
# Assume flux is controlled by SPI rack
fluxcurrent = self.instr_FluxCtrl.get_instr()
if fluxChan is None:
fluxChan = self.fl_dc_ch()
s1 = swf.None_Sweep(name='Frequency', parameter_name='Frequency',
unit='Hz')
s2 = swf.None_Sweep(name='Flux', parameter_name=fluxChan, unit='A')
mocked_values = []
for dac_value in dac_values:
fluxcurrent[fluxChan](dac_value)
total_flux = self.calculate_mock_flux()
h = self.measurement_signal(excited=False)
A0 = self.measurement_signal(excited=True) - h
K_current = np.sqrt(np.abs(np.cos(2*np.pi*total_flux)))
K_power = 1 / \
np.sqrt(1+15**(-(self.spec_pow()-self.mock_spec_pow())/7))
A = K_current*K_power*A0 # Height of peak [V]
wbase = 4e6
w = wbase/np.sqrt(0.1+10**(-(self.spec_pow() -
self.mock_spec_pow()/2)/7)) + wbase
f0 = self.calculate_mock_qubit_frequency()
new_values = h + A*(w/2.0)**2 / ((w/2.0)**2 +
((freqs - f0))**2)
new_values += np.random.normal(0, self.noise()/np.sqrt(self.ro_acq_averages()),
np.size(new_values))
mocked_values = np.concatenate([mocked_values, new_values])
d = det.Mock_Detector(value_names=['Magnitude'], value_units=['V'],
detector_control='soft',
mock_values=mocked_values)
MC.set_sweep_function(s1)
MC.set_sweep_function_2D(s2)
MC.set_sweep_points(freqs)
MC.set_sweep_points_2D(dac_values)
MC.set_detector_function(d)
t_start = time.strftime('%Y%m%d_%H%M%S')
MC.run(name='Qubit_dac_scan'+self.msmt_suffix, mode='2D')
if analyze:
ma.TwoD_Analysis(label='Qubit_dac_scan', close_fig=close_fig)
timestamp = a_tools.get_timestamps_in_range(t_start,
label='Qubit_dac_scan' +
self.msmt_suffix)
timestamp = timestamp[0]
ma2.da.DAC_analysis(timestamp=timestamp)
def measure_resonator_frequency_dac_scan(self, freqs, dac_values, fluxChan,
pulsed=True, MC=None,
analyze=True, close_fig=True,
nested_resonator_calibration=False,
resonator_freqs=None, label=''):
'''
Measures resonator frequency versus flux. Simple model which just
shifts the Lorentzian peak with a simple cosine by 2 MHz.
'''
if MC is None:
MC = self.instr_MC.get_instr()
s1 = swf.None_Sweep(name='Heterodyne Frequency',
parameter_name='Frequency',
unit='Hz')
s2 = swf.None_Sweep(name='Flux', parameter_name=fluxChan,
unit='A')
fluxcurrent = self.instr_FluxCtrl.get_instr()
power = 20*np.log10(self.ro_pulse_amp_CW())
mocked_values = []
try:
device = self.instr_device.get_instr()
qubit_names = device.qubits()
# Create linecuts:
for dac_value in dac_values:
fluxcurrent[fluxChan](dac_value)
h = 10**(power/20)*10e-3
new_values = h
for name in qubit_names:
if name != 'fakequbit':
qubit = device.find_instrument(name)
f0, response = qubit.calculate_mock_resonator_response(
power, freqs)
new_values += response
mocked_values = np.concatenate([mocked_values, new_values])
except AttributeError:
logging.warning('No device found! Using this mock only for for '
'resonator frequencies')
for dac_value in dac_values:
fluxcurrent[fluxChan](dac_value)
h = 10**(power/20)*10e-3
new_values = h + self.calculate_mock_resonator_response(power,
freqs)
mocked_values = np.concatenate([mocked_values, new_values])
mocked_values += np.random.normal(0,
self.noise()/np.sqrt(self.ro_acq_averages()),
np.size(mocked_values))
mocked_values = np.abs(mocked_values)
d = det.Mock_Detector(value_names=['Magnitude'], value_units=['V'],
detector_control='soft',
mock_values=mocked_values)
MC.set_sweep_function(s1)
MC.set_sweep_function_2D(s2)
MC.set_sweep_points(freqs)
MC.set_sweep_points_2D(dac_values)
MC.set_detector_function(d)
MC.run('Resonator_dac_scan'+self.msmt_suffix+label, mode='2D')
if analyze:
ma.TwoD_Analysis(label='Resonator_dac_scan', close_fig=close_fig,
normalize=False)
import pycqed.analysis_v2.dac_scan_analysis as dsa
dsa.Susceptibility_to_Flux_Bias(label='Resonator_dac_scan')
def measure_rabi(self, MC=None, amps=None,
analyze=True, close_fig=True, real_imag=True,
prepare_for_timedomain=True, all_modules=False):
"""
Measurement is the same with and without vsm; therefore there is only
one measurement method rather than two. In
calibrate_mw_pulse_amp_coarse, the required parameter is updated.
"""
if MC is None:
MC = self.instr_MC.get_instr()
if amps is None:
amps = np.linspace(0.1, 1, 31)
s = swf.None_Sweep(name='Channel Amplitude',
parameter_name='mw channel amp',
unit='a.u.')
f_rabi = 1/self.mock_mw_amp180()
low_lvl = self.measurement_signal(excited=False)
high_lvl = self.measurement_signal(excited=True)
freq_qubit = self.calculate_mock_qubit_frequency()
detuning = np.abs(self.freq_qubit() - freq_qubit)/1e6
highlow = (high_lvl-low_lvl)*np.exp(-detuning)
high_lvl = low_lvl + highlow
signal_amp = (high_lvl - low_lvl)/2
offset = (high_lvl + low_lvl)/2
mocked_values = offset + signal_amp*np.cos(np.pi*f_rabi*amps)
mocked_values = self.values_to_IQ(mocked_values) # This adds noise too
d = det.Mock_Detector(value_names=['raw w0', 'raw w1'],
value_units=['V', 'V'], detector_control='soft',
mock_values=mocked_values)
MC.set_sweep_function(s)
MC.set_sweep_points(amps)
MC.set_detector_function(d)
MC.run('mock_rabi'+self.msmt_suffix)
a = ma.Rabi_Analysis(label='rabi_', close_fig=close_fig)
return a.rabi_amplitudes['piPulse']
def measure_ramsey(self, times=None, MC=None,
artificial_detuning: float = None,
freq_qubit: float = None, label: str = '',
prepare_for_timedomain=True, analyze=True,
close_fig=True, update=True, detector=False,
double_fit=False):
if MC is None:
MC = self.instr_MC.get_instr()
if times is None:
stepsize = (self.T2_star()*4/61)//(abs(self.cfg_cycle_time())) \
* abs(self.cfg_cycle_time())
times = np.arange(0, self.T2_star()*4, stepsize)
if artificial_detuning is None:
artificial_detuning = 3/times[-1]
# Calibration points:
dt = times[1] - times[0]
times = np.concatenate([times,
(times[-1]+1*dt,
times[-1]+2*dt,
times[-1]+3*dt,
times[-1]+4*dt)])
# if prepare_for_timedomain:
# self.prepare_for_timedomain()
if freq_qubit is None:
freq_qubit = self.freq_qubit()
self.instr_LO_mw.get_instr().set('frequency', freq_qubit -
self.mw_freq_mod.get() +
artificial_detuning)
s = swf.None_Sweep(name='T2_star', parameter_name='Time',
unit='s')
low_lvl = self.measurement_signal(excited=False)
high_lvl = self.measurement_signal(excited=True)
mock_freq_qubit = self.calculate_mock_qubit_frequency()
detuning = np.abs(self.freq_qubit() - freq_qubit)/1e6
highlow = (high_lvl-low_lvl)*np.exp(-detuning)
high_lvl = low_lvl + highlow
signal_amp = (high_lvl - low_lvl)/2
offset = (high_lvl + low_lvl)/2
phase = 0
oscillation_offset = 0
exponential_offset = offset
frequency = freq_qubit - mock_freq_qubit + artificial_detuning
# Mock values without calibration points
mocked_values = (signal_amp *
np.exp(-(times[0:-4] / self.mock_T2_star())) *
(np.cos(2*np.pi*frequency*times[0:-4] + phase) +
oscillation_offset) + exponential_offset)
# Calibration points
mocked_values = np.concatenate([mocked_values,
low_lvl, low_lvl, high_lvl, high_lvl])
# Add noise:
mocked_values += np.random.normal(0,
self.noise()/np.sqrt(self.ro_acq_averages()),
np.size(mocked_values))
mocked_values = self.values_to_IQ(mocked_values)
d = det.Mock_Detector(value_names=['raw w1', 'raw w0'],
value_units=['V', 'V'],
detector_control='soft',
mock_values=mocked_values)
MC.set_sweep_function(s)
MC.set_sweep_points(times)
MC.set_detector_function(d)
MC.run('mock_Ramsey' + self.msmt_suffix)
if analyze:
a = ma.Ramsey_Analysis(auto=True, closefig=True,
freq_qubit=freq_qubit,
artificial_detuning=artificial_detuning)
if update:
self.T2_star(a.T2_star['T2_star'])
if double_fit:
b = ma.DoubleFrequency()
res = {
'T2star1': b.tau1,
'T2star2': b.tau2,
'frequency1': b.f1,
'frequency2': b.f2
}
return res
else:
res = {
'T2star': a.T2_star['T2_star'],
'frequency': a.qubit_frequency,
}
return res
def measure_echo(self, times=None, MC=None, analyze=True, close_fig=True,
update=True, label: str = ''):
if MC is None:
MC = self.instr_MC.get_instr()
if times is None:
stepsize = (self.T2_echo()*2/61)//(abs(self.cfg_cycle_time())) \
* abs(self.cfg_cycle_time())
times = np.arange(0, self.T2_echo()*4, stepsize*2)
dt = times[1] - times[0]
times = np.concatenate([times,
(times[-1]+1*dt,
times[-1]+2*dt,
times[-1]+3*dt,
times[-1]+4*dt)])
s = swf.None_Sweep(parameter_name='Time', unit='s')
low_lvl = self.measurement_signal(excited=False)
high_lvl = self.measurement_signal(excited=True)
freq_qubit = self.calculate_mock_qubit_frequency()
detuning = np.abs(self.freq_qubit() - freq_qubit)/1e6
highlow = (high_lvl-low_lvl)*np.exp(-detuning)
high_lvl = low_lvl + highlow
signal_amp = (high_lvl - low_lvl)/2
offset = (high_lvl + low_lvl)/2
phase = np.pi
oscillation_offset = 0
exponential_offset = offset
frequency = 4/times[-1] # 4 oscillations
# Mock values without calibration points
mocked_values = (signal_amp *
np.exp(-(times[0:-4] / self.mock_T2_echo())) *
(np.cos(2*np.pi*frequency*times[0:-4] + phase) +
oscillation_offset) + exponential_offset)
mocked_values = self.values_to_IQ(mocked_values)
d = det.Mock_Detector(value_names=['raw w1', 'raw w0'],
value_units=['V', 'V'],
detector_control='soft',
mock_values=mocked_values)
MC.set_sweep_function(s)
MC.set_sweep_points(times)
MC.set_detector_function(d)
MC.run('mock_echo' + self.msmt_suffix)
if analyze:
# N.B. v1.5 analysis
a = ma.Echo_analysis_V15(label='echo', auto=True, close_fig=True)
if update:
self.T2_echo(a.fit_res.params['tau'].value)
return a.fit_res.params['tau'].value
def measure_T1(self, times=None, MC=None, analyze=True, close_fig=True,
update=True, prepare_for_timedomain=True):
'''
Very simple version that just returns a exponential decay based on
mock_T1. Might be improved by making it depend on how close your
pulse amp is.
'''
if MC is None:
MC = self.instr_MC.get_instr()
if times is None:
times = np.linspace(0, self.T1()*4, 31)
# Calibration points
dt = times[1] - times[0]
times = np.concatenate([times,
(times[-1]+1*dt,
times[-1]+2*dt,
times[-1]+3*dt,
times[-1]+4*dt)])
s = swf.None_Sweep(parameter_name='Time', unit='s')
low_lvl = self.measurement_signal(excited=False)
high_lvl = self.measurement_signal(excited=True)
freq_qubit = self.calculate_mock_qubit_frequency()
detuning = np.abs(self.freq_qubit() - freq_qubit)/1e6
highlow = (high_lvl-low_lvl)*np.exp(-detuning)
high_lvl = low_lvl + highlow
amplitude = high_lvl - low_lvl
mocked_values = amplitude*np.exp(-(times[0:-4]/self.mock_T1()))+low_lvl
mocked_values = np.concatenate(
[mocked_values, low_lvl, low_lvl, high_lvl, high_lvl])
mocked_values = self.values_to_IQ(mocked_values)
d = det.Mock_Detector(value_names=['raw w0', 'raw w1'],
value_units=['V', 'V'], detector_control='soft',
mock_values=mocked_values)
MC.set_sweep_function(s)
MC.set_sweep_points(times)
MC.set_detector_function(d)
MC.run('mock_T1'+self.msmt_suffix)
if analyze:
a = ma.T1_Analysis(auto=True, close_fig=True)
if update:
self.T1(a.T1)
return a.T1
def measure_ALLXY(self, MC=None, label: str = '', analyze=True,
close_fig=True):
"""
NOT IMPLEMENTED YET
"""
if MC is None:
MC = self.instr_MC.get_instr()
if analyze:
a = ma.ALLXY_Analysis(close_main_fig=close_fig)
return a.deviation_total
def measurement_signal(self, excited=False):
'''
Sets the readout signal level, depending on the readout frequency and
resonator frequency.
The 'excited' parameter indicates whether the qubit is in the excited
state, which results in a 2 chi shift of the resonator
'''
power = 20*np.log10(self.ro_pulse_amp_CW())
f_ro = self.ro_freq()
h = 10**(power/20)*10e-3 # Lorentian baseline [V]
f0, dip = self.calculate_mock_resonator_response(power, np.array([f_ro]),
excited=excited)
signal = h + dip
if type(signal) is list:
signal = signal[0]
return signal
def values_to_IQ(self, mocked_values, theta=15):
theta = theta * np.pi/180
MockI = 1/np.sqrt(2)*np.real(mocked_values*np.exp(1j*theta))
MockQ = 1/np.sqrt(2)*np.real(mocked_values*np.exp(1j*(theta-np.pi/2)))
IQ_values = []
for I, Q in zip(MockI, MockQ):
I += np.random.normal(0, self.noise() /
np.sqrt(self.ro_acq_averages()), 1)
Q += np.random.normal(0, self.noise() /
np.sqrt(self.ro_acq_averages()), 1)
IQ_values.append([I, Q])
return IQ_values
def calculate_f_qubit_from_power_scan(self, f_bare, f_shifted,
g_coupling=65e6, RWA=False):
'''
Inputs are in Hz
f_bare: the resonator frequency without a coupled qubit
f_shifted: the reso freq shifted due to coupling of a qwubit
g_coupling: the coupling strengs
Output:
f_q: in Hz
'''
w_r = f_bare * 2 * np.pi
w_shift = f_shifted * 2*np.pi
g = 2*np.pi * g_coupling
shift = (w_shift - w_r)/g**2
# f_shift > 0 when f_qubit<f_res
# For the non-RWA result (only dispersive approximation)
if (RWA is False):
w_q = -1/(shift) + np.sqrt(1/(shift**2)+w_r**2)
# For the RWA approximation
else:
w_q = -1/shift + w_r
return w_q/(2.*np.pi)
def calculate_g_coupling_from_frequency_shift(self, f_bare, f_shifted,
f_qubit):
w_r = 2*np.pi * f_bare
w_shift = 2*np.pi * f_shifted
w_q = 2*np.pi*f_qubit
shift = w_shift-w_r
rhs = 1./(w_q-w_r) + 1./(w_q+w_r)
# rhs_RWA = 1./(w_q-w_r)
return np.sqrt(np.abs(shift/rhs))/(2*np.pi)
def calculate_mock_flux(self):
"""
Calculates total flux through SQUID loop by a weighted sum of all
contributions from all FBLs, and subtracting the sweetspot flux.
"""
fluxcurrent = self.instr_FluxCtrl.get_instr()
flux = 0
for FBL in fluxcurrent.channel_map:
current = fluxcurrent[FBL]()
flux += current/self.mock_fl_dc_I_per_phi0()[FBL]
flux -= self.mock_sweetspot_phi_over_phi0()
return flux
def calculate_mock_qubit_frequency(self):
'''
Cleaner way of calculating the qubit frequency, depending on:
- Flux (current)
- Ec, EJ
- Chi01
'''
phi_over_phi0 = self.calculate_mock_flux()
if self.mock_Ej1() > self.mock_Ej2():
alpha = self.mock_Ej1()/self.mock_Ej2()
else:
alpha = self.mock_Ej2()/self.mock_Ej1()
d = (alpha-1)/(alpha+1)
Ej_sum = self.mock_Ej1() + self.mock_Ej2()
Ej_eff = np.abs(Ej_sum*np.cos(np.pi*phi_over_phi0) *
np.sqrt(1 + d**2 * (np.tan(np.pi*phi_over_phi0))**2))
f_qubit = (np.sqrt(8*self.mock_Ec()*Ej_eff) -
self.mock_Ec() + self.mock_chi01())
return f_qubit
def calculate_mock_resonator_response(self, power, freqs, excited=False):
"""
Cleaner way of calculating resonator frequency, depending on power etc.
Makes it easier to use a mock device with multiple resonators
Returns resonant frequency and Lorentzian as a combination of both the
test and qubit resonators
TODO: Make hanger instead of Lorentzian
"""
res_power = 20*np.log10(self.mock_ro_pulse_amp_CW())
pow_shift = self.mock_pow_shift()
h = 10**(power/20)*10e-3 # Lorentzian baseline [V]
Q = self.mock_Q()
Qe = self.mock_Qe()
theta = self.mock_theta()
slope = self.mock_slope()
phi_I = self.mock_phi_I()
phi_0 = self.mock_phi_0()
Q_test = self.mock_test_Q()
Qe_test = self.mock_test_Qe()
theta_test = self.mock_test_theta()
slope_test = self.mock_test_slope()
phi_I_test = self.mock_test_phi_I()
phi_0_test = self.mock_test_phi_0()
if power <= res_power:
# Good signal
f0 = self.calculate_mock_resonator_frequency(excited=excited)
res_qubit_dip = fm.hanger_func_complex_SI(freqs, f0, Q, Qe, h,
theta, phi_I, phi_0,
slope=slope)
elif (power > res_power) and (power < res_power + pow_shift):
# Noisy regime -> width increases, peak decreases
f0 = self.calculate_mock_resonator_frequency(excited=excited)
f0_high = self.mock_freq_res_bare()
f_shift = f0 - f0_high
f0 = f0 - ((power - res_power)/pow_shift)*f_shift
Q_decrease = (1+(power-res_power)/pow_shift*10)
Q_nonlinear = Q # /Q_decrease
Qe_nonlinear = Qe # /Q_decrease
res_qubit_dip = fm.hanger_func_complex_SI(freqs, f0, Q_nonlinear,
Qe_nonlinear, h,
theta, phi_I, phi_0,
slope=slope) - h
res_qubit_dip = res_qubit_dip/Q_decrease
res_qubit_dip += h
# Add some extra noise
for i, value in enumerate(res_qubit_dip):
d = np.abs(value - h)
value += np.random.normal(0,
self.noise() /
self.ro_acq_averages()*10*Q_decrease,
1)
res_qubit_dip[i] = value
else:
# High power regime
f0 = self.mock_freq_res_bare()
res_qubit_dip = fm.hanger_func_complex_SI(freqs, f0, Q, Qe, h,
theta, phi_I, phi_0,
slope=slope)
if self.mock_freq_test_res() is not None:
f0_test_res = self.mock_freq_test_res()
test_res_response = fm.hanger_func_complex_SI(freqs, f0_test_res,
Q_test, Qe_test, h,
theta_test,
phi_I_test,
phi_0_test,
slope=slope_test)
else:
test_res_response = np.ones(np.size(freqs))*h
response = np.real(res_qubit_dip - h + test_res_response - h)
return f0, response
def calculate_mock_resonator_frequency(self, excited=False):
"""
Calculates the resonator frequency depeding on flux etc.
"""
freq_qubit_12 = self.calculate_mock_qubit_frequency() + self.mock_Ec()
freq_qubit = self.calculate_mock_qubit_frequency()
chi01 = self.mock_coupling01()**2 / (freq_qubit -
self.mock_freq_res_bare())
chi12 = self.mock_coupling12()**2 / (freq_qubit_12 -
self.mock_freq_res_bare())
if excited:
chi = chi01 - chi12/2
else:
chi = 0
f0 = self.mock_freq_res_bare() - chi12/2 - 2*chi
return f0
###########################################################################
# AutoDepGraph
###########################################################################
def dep_graph(self):
dag = AutoDepGraph_DAG('DAG')
cal_True_delayed = 'autodepgraph.node_functions.calibration_functions.test_calibration_True_delayed'
dag.add_node('Resonators Wide Search',
calibrate_function=self.name + '.find_resonators')
dag.add_node('Zoom on resonators',
calibrate_function=self.name + '.find_resonator_frequency_initial')
dag.add_node('Resonators Power Scan',
calibrate_function=self.name + '.find_test_resonators')
dag.add_node('Resonators Flux Sweep',
calibrate_function=self.name + '.find_qubit_resonator_fluxline')
dag.add_node(self.name + ' Resonator Frequency',
calibrate_function=self.name + '.find_resonator_frequency')
dag.add_node(self.name + ' Resonator Power Scan',
calibrate_function=self.name + '.calibrate_ro_pulse_amp_CW')
# Calibration of instruments and ro
# dag.add_node(self.name + ' Calibrations',
# calibrate_function=cal_True_delayed)
# dag.add_node(self.name + ' Mixer Skewness',
# calibrate_function=self.name + '.calibrate_mixer_skewness_drive')
# dag.add_node(self.name + ' Mixer Offset Drive',
# calibrate_function=self.name + '.calibrate_mixer_offsets_drive')
# dag.add_node(self.name + ' Mixer Offset Readout',
# calibrate_function=self.name + '.calibrate_mixer_offsets_RO')
# dag.add_node(self.name + ' Ro/MW pulse timing',
# calibrate_function=cal_True_delayed)
# dag.add_node(self.name + ' Ro Pulse Amplitude',
# calibrate_function=self.name + '.ro_pulse_amp_CW')
# Qubits calibration
dag.add_node(self.name + ' Frequency Coarse',
calibrate_function=self.name + '.find_frequency',
check_function=self.name + '.check_qubit_spectroscopy',
tolerance=0.2e-3)
dag.add_node(self.name + ' Frequency at Sweetspot',
calibrate_function=self.name + '.find_frequency')
dag.add_node(self.name + ' Spectroscopy Power',
calibrate_function=self.name + '.calibrate_spec_pow')
dag.add_node(self.name + ' Sweetspot',
calibrate_function=self.name + '.find_qubit_sweetspot')
dag.add_node(self.name + ' Rabi',
calibrate_function=self.name + '.calibrate_mw_pulse_amplitude_coarse',
check_function=self.name + '.check_rabi',
tolerance=0.01)
dag.add_node(self.name + ' Frequency Fine',
calibrate_function=self.name + '.calibrate_frequency_ramsey',
check_function=self.name + '.check_ramsey',
tolerance=0.1e-3)
# Validate qubit calibration
dag.add_node(self.name + ' ALLXY',
calibrate_function=self.name + '.measure_allxy')
dag.add_node(self.name + ' MOTZOI Calibration',
calibrate_function=self.name + '.calibrate_motzoi')
# If all goes well, the qubit is fully 'calibrated' and can be controlled
# Qubits measurements
dag.add_node(self.name + ' Anharmonicity')
dag.add_node(self.name + ' Avoided Crossing')
dag.add_node(self.name + ' T1')
dag.add_node(self.name + ' T1(time)')
dag.add_node(self.name + ' T1(frequency)')
dag.add_node(self.name + ' T2_Echo')
dag.add_node(self.name + ' T2_Echo(time)')
dag.add_node(self.name + ' T2_Echo(frequency)')
dag.add_node(self.name + ' T2_Star')
dag.add_node(self.name + ' T2_Star(time)')
dag.add_node(self.name + ' T2_Star(frequency)')
#######################################################################
# EDGES
#######################################################################
# VNA
dag.add_edge('Zoom on resonators', 'Resonators Wide Search')
dag.add_edge('Resonators Power Scan',
'Zoom on resonators')
dag.add_edge('Resonators Flux Sweep',
'Zoom on resonators')
dag.add_edge('Resonators Flux Sweep',
'Resonators Power Scan')
# Resonators
dag.add_edge(self.name + ' Resonator Frequency',
'Resonators Power Scan')
dag.add_edge(self.name + ' Resonator Frequency',
'Resonators Flux Sweep')
dag.add_edge(self.name + ' Resonator Power Scan',
self.name + ' Resonator Frequency')
dag.add_edge(self.name + ' Frequency Coarse',
self.name + ' Resonator Power Scan')
# Qubit Calibrations
dag.add_edge(self.name + ' Frequency Coarse',
self.name + ' Resonator Frequency')
# dag.add_edge(self.name + ' Frequency Coarse',
# self.name + ' Calibrations')
# Calibrations
# dag.add_edge(self.name + ' Calibrations',
# self.name + ' Mixer Skewness')
# dag.add_edge(self.name + ' Calibrations',
# self.name + ' Mixer Offset Drive')
# dag.add_edge(self.name + ' Calibrations',
# self.name + ' Mixer Offset Readout')
# dag.add_edge(self.name + ' Calibrations',
# self.name + ' Ro/MW pulse timing')
# dag.add_edge(self.name + ' Calibrations',
# self.name + ' Ro Pulse Amplitude')
# Qubit
dag.add_edge(self.name + ' Spectroscopy Power',
self.name + ' Frequency Coarse')
dag.add_edge(self.name + ' Sweetspot',
self.name + ' Frequency Coarse')
dag.add_edge(self.name + ' Sweetspot',
self.name + ' Spectroscopy Power')
dag.add_edge(self.name + ' Rabi',
self.name + ' Frequency at Sweetspot')
dag.add_edge(self.name + ' Frequency Fine',
self.name + ' Frequency at Sweetspot')
dag.add_edge(self.name + ' Frequency Fine',
self.name + ' Rabi')
dag.add_edge(self.name + ' Frequency at Sweetspot',
self.name + ' Sweetspot')
dag.add_edge(self.name + ' ALLXY',
self.name + ' Rabi')
dag.add_edge(self.name + ' ALLXY',
self.name + ' Frequency Fine')
dag.add_edge(self.name + ' ALLXY',
self.name + ' MOTZOI Calibration')
# Perform initial measurements to see if they make sense
dag.add_edge(self.name + ' T1',
self.name + ' ALLXY')
dag.add_edge(self.name + ' T2_Echo',
self.name + ' ALLXY')
dag.add_edge(self.name + ' T2_Star',
self.name + ' ALLXY')
# Measure as function of frequency and time
dag.add_edge(self.name + ' T1(frequency)',
self.name + ' T1')
dag.add_edge(self.name + ' T1(time)',
self.name + ' T1')
dag.add_edge(self.name + ' T2_Echo(frequency)',
self.name + ' T2_Echo')
dag.add_edge(self.name + ' T2_Echo(time)',
self.name + ' T2_Echo')
dag.add_edge(self.name + ' T2_Star(frequency)',
self.name + ' T2_Star')
dag.add_edge(self.name + ' T2_Star(time)',
self.name + ' T2_Star')
dag.add_edge(self.name + ' DAC Arc Polynomial',
self.name + ' Frequency at Sweetspot')
# Measurements of anharmonicity and avoided crossing
dag.add_edge(self.name + ' f_12 estimate',
self.name + ' Frequency at Sweetspot')
dag.add_edge(self.name + ' Anharmonicity',
self.name + ' f_12 estimate')
dag.add_edge(self.name + ' Avoided Crossing',
self.name + ' DAC Arc Polynomial')
dag.cfg_plot_mode = 'svg'
dag.update_monitor()
dag.cfg_svg_filename
url = dag.open_html_viewer()
print('Dependancy Graph Created. URL = '+url)
self._dag = dag
return dag
# Create folder
# datadir = r'D:\GitHubRepos\PycQED_py3\pycqed\tests\test_output'
# datestr = time.strftime('%Y%m%d')
# timestr = time.strftime('%H%M%S')
# folder_name = '_Dep_Graph_Maintenance'
# try:
# os.mkdir(datadir + '\\' + datestr + '\\' + timestr + folder_name)
# except FileExistsError:
# pass
# def show_dep_graph(self, dag):
# dag.cfg_plot_mode = 'svg'
# dag.update_monitor()
# dag.cfg_svg_filename
# url = dag.open_html_viewer()
# print(url)
| mit |
OpringaoDoTurno/airflow | airflow/www/views.py | 4 | 99015 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from past.builtins import basestring, unicode
import ast
import datetime as dt
import logging
import os
import pkg_resources
import socket
from functools import wraps
from datetime import timedelta
import copy
import math
import json
import bleach
import pendulum
from collections import defaultdict
import inspect
from textwrap import dedent
import traceback
import sqlalchemy as sqla
from sqlalchemy import or_, desc, and_, union_all
from flask import (
abort, redirect, url_for, request, Markup, Response, current_app, render_template,
make_response)
from flask_admin import BaseView, expose, AdminIndexView
from flask_admin.contrib.sqla import ModelView
from flask_admin.actions import action
from flask_admin.babel import lazy_gettext
from flask_admin.tools import iterdecode
from flask_login import flash
from flask._compat import PY2
from jinja2.sandbox import ImmutableSandboxedEnvironment
from jinja2 import escape
import markdown
import nvd3
from wtforms import (
Form, SelectField, TextAreaField, PasswordField,
StringField, validators)
from flask_admin.form.fields import DateTimeField
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
import airflow
from airflow import configuration as conf
from airflow import models
from airflow import settings
from airflow.api.common.experimental.mark_tasks import set_dag_run_state
from airflow.exceptions import AirflowException
from airflow.settings import Session
from airflow.models import XCom, DagRun
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, SCHEDULER_DEPS
from airflow.models import BaseOperator
from airflow.operators.subdag_operator import SubDagOperator
from airflow.utils import timezone
from airflow.utils.json import json_ser
from airflow.utils.state import State
from airflow.utils.db import create_session, provide_session
from airflow.utils.helpers import alchemy_to_dict
from airflow.utils.dates import infer_time_unit, scale_time_units, parse_execution_date
from airflow.utils.timezone import datetime
from airflow.www import utils as wwwutils
from airflow.www.forms import DateTimeForm, DateTimeWithNumRunsForm
from airflow.www.validators import GreaterEqualThan
QUERY_LIMIT = 100000
CHART_LIMIT = 200000
dagbag = models.DagBag(settings.DAGS_FOLDER)
login_required = airflow.login.login_required
current_user = airflow.login.current_user
logout_user = airflow.login.logout_user
FILTER_BY_OWNER = False
PAGE_SIZE = conf.getint('webserver', 'page_size')
if conf.getboolean('webserver', 'FILTER_BY_OWNER'):
# filter_by_owner if authentication is enabled and filter_by_owner is true
FILTER_BY_OWNER = not current_app.config['LOGIN_DISABLED']
def dag_link(v, c, m, p):
dag_id = bleach.clean(m.dag_id)
url = url_for(
'airflow.graph',
dag_id=dag_id,
execution_date=m.execution_date)
return Markup(
'<a href="{}">{}</a>'.format(url, dag_id))
def log_url_formatter(v, c, m, p):
return Markup(
'<a href="{m.log_url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(**locals())
def dag_run_link(v, c, m, p):
dag_id = bleach.clean(m.dag_id)
url = url_for(
'airflow.graph',
dag_id=m.dag_id,
run_id=m.run_id,
execution_date=m.execution_date)
return Markup('<a href="{url}">{m.run_id}</a>'.format(**locals()))
def task_instance_link(v, c, m, p):
dag_id = bleach.clean(m.dag_id)
task_id = bleach.clean(m.task_id)
url = url_for(
'airflow.task',
dag_id=dag_id,
task_id=task_id,
execution_date=m.execution_date.isoformat())
url_root = url_for(
'airflow.graph',
dag_id=dag_id,
root=task_id,
execution_date=m.execution_date.isoformat())
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="glyphicon glyphicon-filter" style="margin-left: 0px;"
aria-hidden="true"></span>
</a>
</span>
""".format(**locals()))
def state_token(state):
color = State.color(state)
return Markup(
'<span class="label" style="background-color:{color};">'
'{state}</span>'.format(**locals()))
def parse_datetime_f(value):
if not isinstance(value, dt.datetime):
return value
return timezone.make_aware(value)
def state_f(v, c, m, p):
return state_token(m.state)
def duration_f(v, c, m, p):
if m.end_date and m.duration:
return timedelta(seconds=m.duration)
def datetime_f(v, c, m, p):
attr = getattr(m, p)
dttm = attr.isoformat() if attr else ''
if timezone.utcnow().isoformat()[:4] == dttm[:4]:
dttm = dttm[5:]
return Markup("<nobr>{}</nobr>".format(dttm))
def nobr_f(v, c, m, p):
return Markup("<nobr>{}</nobr>".format(getattr(m, p)))
def label_link(v, c, m, p):
try:
default_params = ast.literal_eval(m.default_params)
except:
default_params = {}
url = url_for(
'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,
**default_params)
return Markup("<a href='{url}'>{m.label}</a>".format(**locals()))
def pool_link(v, c, m, p):
url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool
return Markup("<a href='{url}'>{m.pool}</a>".format(**locals()))
def pygment_html_render(s, lexer=lexers.TextLexer):
return highlight(
s,
lexer(),
HtmlFormatter(linenos=True),
)
def render(obj, lexer):
out = ""
if isinstance(obj, basestring):
out += pygment_html_render(obj, lexer)
elif isinstance(obj, (tuple, list)):
for i, s in enumerate(obj):
out += "<div>List item #{}</div>".format(i)
out += "<div>" + pygment_html_render(s, lexer) + "</div>"
elif isinstance(obj, dict):
for k, v in obj.items():
out += '<div>Dict item "{}"</div>'.format(k)
out += "<div>" + pygment_html_render(v, lexer) + "</div>"
return out
def wrapped_markdown(s):
return '<div class="rich_doc">' + markdown.markdown(s) + "</div>"
attr_renderer = {
'bash_command': lambda x: render(x, lexers.BashLexer),
'hql': lambda x: render(x, lexers.SqlLexer),
'sql': lambda x: render(x, lexers.SqlLexer),
'doc': lambda x: render(x, lexers.TextLexer),
'doc_json': lambda x: render(x, lexers.JsonLexer),
'doc_rst': lambda x: render(x, lexers.RstLexer),
'doc_yaml': lambda x: render(x, lexers.YamlLexer),
'doc_md': wrapped_markdown,
'python_callable': lambda x: render(
inspect.getsource(x), lexers.PythonLexer),
}
def data_profiling_required(f):
"""Decorator for views requiring data profiling access"""
@wraps(f)
def decorated_function(*args, **kwargs):
if (
current_app.config['LOGIN_DISABLED'] or
(not current_user.is_anonymous() and current_user.data_profiling())
):
return f(*args, **kwargs)
else:
flash("This page requires data profiling privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
def fused_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=running')
return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots()))
def fqueued_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=queued&sort=10&desc=1')
return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots()))
def recurse_tasks(tasks, task_ids, dag_ids, task_id_to_dag):
if isinstance(tasks, list):
for task in tasks:
recurse_tasks(task, task_ids, dag_ids, task_id_to_dag)
return
if isinstance(tasks, SubDagOperator):
subtasks = tasks.subdag.tasks
dag_ids.append(tasks.subdag.dag_id)
for subtask in subtasks:
if subtask.task_id not in task_ids:
task_ids.append(subtask.task_id)
task_id_to_dag[subtask.task_id] = tasks.subdag
recurse_tasks(subtasks, task_ids, dag_ids, task_id_to_dag)
if isinstance(tasks, BaseOperator):
task_id_to_dag[tasks.task_id] = tasks.dag
def get_chart_height(dag):
"""
TODO(aoen): See [AIRFLOW-1263] We use the number of tasks in the DAG as a heuristic to
approximate the size of generated chart (otherwise the charts are tiny and unreadable
when DAGs have a large number of tasks). Ideally nvd3 should allow for dynamic-height
charts, that is charts that take up space based on the size of the components within.
"""
return 600 + len(dag.tasks) * 10
class Airflow(BaseView):
def is_visible(self):
return False
@expose('/')
@login_required
def index(self):
return self.render('airflow/dags.html')
@expose('/chart_data')
@data_profiling_required
@wwwutils.gzipped
# @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)
def chart_data(self):
from airflow import macros
import pandas as pd
if conf.getboolean('core', 'secure_mode'):
abort(404)
with create_session() as session:
chart_id = request.args.get('chart_id')
csv = request.args.get('csv') == "true"
chart = session.query(models.Chart).filter_by(id=chart_id).first()
db = session.query(
models.Connection).filter_by(conn_id=chart.conn_id).first()
payload = {
"state": "ERROR",
"error": ""
}
# Processing templated fields
try:
args = ast.literal_eval(chart.default_params)
if type(args) is not type(dict()):
raise AirflowException('Not a dict')
except:
args = {}
payload['error'] += (
"Default params is not valid, string has to evaluate as "
"a Python dictionary. ")
request_dict = {k: request.args.get(k) for k in request.args}
args.update(request_dict)
args['macros'] = macros
sandbox = ImmutableSandboxedEnvironment()
sql = sandbox.from_string(chart.sql).render(**args)
label = sandbox.from_string(chart.label).render(**args)
payload['sql_html'] = Markup(highlight(
sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
payload['label'] = label
pd.set_option('display.max_colwidth', 100)
hook = db.get_hook()
try:
df = hook.get_pandas_df(
wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type))
df = df.fillna(0)
except Exception as e:
payload['error'] += "SQL execution failed. Details: " + str(e)
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
if not payload['error'] and len(df) == CHART_LIMIT:
payload['warning'] = (
"Data has been truncated to {0}"
" rows. Expect incomplete results.").format(CHART_LIMIT)
if not payload['error'] and len(df) == 0:
payload['error'] += "Empty result set. "
elif (
not payload['error'] and
chart.sql_layout == 'series' and
chart.chart_type != "datatable" and
len(df.columns) < 3):
payload['error'] += "SQL needs to return at least 3 columns. "
elif (
not payload['error'] and
chart.sql_layout == 'columns' and
len(df.columns) < 2):
payload['error'] += "SQL needs to return at least 2 columns. "
elif not payload['error']:
import numpy as np
chart_type = chart.chart_type
data = None
if chart.show_datatable or chart_type == "datatable":
data = df.to_dict(orient="split")
data['columns'] = [{'title': c} for c in data['columns']]
payload['data'] = data
# Trying to convert time to something Highcharts likes
x_col = 1 if chart.sql_layout == 'series' else 0
if chart.x_is_date:
try:
# From string to datetime
df[df.columns[x_col]] = pd.to_datetime(
df[df.columns[x_col]])
df[df.columns[x_col]] = df[df.columns[x_col]].apply(
lambda x: int(x.strftime("%s")) * 1000)
except Exception as e:
payload['error'] = "Time conversion failed"
if chart_type == 'datatable':
payload['state'] = 'SUCCESS'
return wwwutils.json_response(payload)
else:
if chart.sql_layout == 'series':
# User provides columns (series, x, y)
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
df[df.columns[2]] = df[df.columns[2]].astype(np.float)
df = df.pivot_table(
index=df.columns[1],
columns=df.columns[0],
values=df.columns[2], aggfunc=np.sum)
else:
# User provides columns (x, y, metric1, metric2, ...)
xaxis_label = df.columns[0]
yaxis_label = 'y'
df.index = df[df.columns[0]]
df = df.sort(df.columns[0])
del df[df.columns[0]]
for col in df.columns:
df[col] = df[col].astype(np.float)
df = df.fillna(0)
NVd3ChartClass = chart_mapping.get(chart.chart_type)
NVd3ChartClass = getattr(nvd3, NVd3ChartClass)
nvd3_chart = NVd3ChartClass(x_is_date=chart.x_is_date)
for col in df.columns:
nvd3_chart.add_serie(name=col, y=df[col].tolist(), x=df[col].index.tolist())
try:
nvd3_chart.buildcontent()
payload['chart_type'] = nvd3_chart.__class__.__name__
payload['htmlcontent'] = nvd3_chart.htmlcontent
except Exception as e:
payload['error'] = str(e)
payload['state'] = 'SUCCESS'
payload['request_dict'] = request_dict
return wwwutils.json_response(payload)
@expose('/chart')
@data_profiling_required
def chart(self):
if conf.getboolean('core', 'secure_mode'):
abort(404)
with create_session() as session:
chart_id = request.args.get('chart_id')
embed = request.args.get('embed')
chart = session.query(models.Chart).filter_by(id=chart_id).first()
NVd3ChartClass = chart_mapping.get(chart.chart_type)
if not NVd3ChartClass:
flash(
"Not supported anymore as the license was incompatible, "
"sorry",
"danger")
redirect('/admin/chart/')
sql = ""
if chart.show_sql:
sql = Markup(highlight(
chart.sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/nvd3.html',
chart=chart,
title="Airflow - Chart",
sql=sql,
label=chart.label,
embed=embed)
@expose('/dag_stats')
@login_required
@provide_session
def dag_stats(self, session=None):
ds = models.DagStat
ds.update()
qry = (
session.query(ds.dag_id, ds.state, ds.count)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.dag_states:
try:
count = data[dag.dag_id][state]
except Exception:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/task_stats')
@login_required
@provide_session
def task_stats(self, session=None):
TI = models.TaskInstance
DagRun = models.DagRun
Dag = models.DagModel
LastDagRun = (
session.query(DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('execution_date'))
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state != State.RUNNING)
.filter(Dag.is_active == True)
.group_by(DagRun.dag_id)
.subquery('last_dag_run')
)
RunningDagRun = (
session.query(DagRun.dag_id, DagRun.execution_date)
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state == State.RUNNING)
.filter(Dag.is_active == True)
.subquery('running_dag_run')
)
# Select all task_instances from active dag_runs.
# If no dag_run is active, return task instances from most recent dag_run.
LastTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(LastDagRun, and_(
LastDagRun.c.dag_id == TI.dag_id,
LastDagRun.c.execution_date == TI.execution_date))
)
RunningTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(RunningDagRun, and_(
RunningDagRun.c.dag_id == TI.dag_id,
RunningDagRun.c.execution_date == TI.execution_date))
)
UnionTI = union_all(LastTI, RunningTI).alias('union_ti')
qry = (
session.query(UnionTI.c.dag_id, UnionTI.c.state, sqla.func.count())
.group_by(UnionTI.c.dag_id, UnionTI.c.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
session.commit()
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.task_states:
try:
count = data[dag.dag_id][state]
except:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/code')
@login_required
def code(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = dag_id
try:
with open(dag.fileloc, 'r') as f:
code = f.read()
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
except IOError as e:
html_code = str(e)
return self.render(
'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'))
@expose('/dag_details')
@login_required
@provide_session
def dag_details(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = "DAG details"
TI = models.TaskInstance
states = (
session.query(TI.state, sqla.func.count(TI.dag_id))
.filter(TI.dag_id == dag_id)
.group_by(TI.state)
.all()
)
return self.render(
'airflow/dag_details.html',
dag=dag, title=title, states=states, State=State)
@current_app.errorhandler(404)
def circles(self):
return render_template(
'airflow/circles.html', hostname=socket.getfqdn()), 404
@current_app.errorhandler(500)
def show_traceback(self):
from airflow.utils import asciiart as ascii_
return render_template(
'airflow/traceback.html',
hostname=socket.getfqdn(),
nukular=ascii_.nukular,
info=traceback.format_exc()), 500
@expose('/noaccess')
def noaccess(self):
return self.render('airflow/noaccess.html')
@expose('/pickle_info')
@login_required
def pickle_info(self):
d = {}
dag_id = request.args.get('dag_id')
dags = [dagbag.dags.get(dag_id)] if dag_id else dagbag.dags.values()
for dag in dags:
if not dag.is_subdag:
d[dag.dag_id] = dag.pickle_info()
return wwwutils.json_response(d)
@expose('/login', methods=['GET', 'POST'])
def login(self):
return airflow.login.login(self, request)
@expose('/logout')
def logout(self):
logout_user()
flash('You have been logged out.')
return redirect(url_for('admin.index'))
@expose('/rendered')
@login_required
@wwwutils.action_logging
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.render_templates()
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.__class__.template_fields:
content = getattr(task, template_field)
if template_field in attr_renderer:
html_dict[template_field] = attr_renderer[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
title=title, )
@expose('/log')
@login_required
@wwwutils.action_logging
@provide_session
def log(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
ti = session.query(models.TaskInstance).filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm).first()
if ti is None:
logs = ["*** Task instance did not exist in the DB\n"]
else:
logger = logging.getLogger('airflow.task')
task_log_reader = conf.get('core', 'task_log_reader')
handler = next((handler for handler in logger.handlers
if handler.name == task_log_reader), None)
try:
ti.task = dag.get_task(ti.task_id)
logs = handler.read(ti)
except AttributeError as e:
logs = ["Task log handler {} does not support read logs.\n{}\n" \
.format(task_log_reader, str(e))]
for i, log in enumerate(logs):
if PY2 and not isinstance(log, unicode):
logs[i] = log.decode('utf-8')
return self.render(
'airflow/ti_log.html',
logs=logs, dag=dag, title="Log by attempts", task_id=task_id,
execution_date=execution_date, form=form)
@expose('/task')
@login_required
@wwwutils.action_logging
def task(self):
TI = models.TaskInstance
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
task = copy.copy(dag.get_task(task_id))
task.resolve_template_files()
ti = TI(task=task, execution_date=dttm)
ti.refresh_from_db()
ti_attrs = []
for attr_name in dir(ti):
if not attr_name.startswith('_'):
attr = getattr(ti, attr_name)
if type(attr) != type(self.task):
ti_attrs.append((attr_name, str(attr)))
task_attrs = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in attr_renderer:
task_attrs.append((attr_name, str(attr)))
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in attr_renderer:
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)
no_failed_deps_result = [(
"Unknown",
dedent("""\
All dependencies are met but the task instance is not running. In most cases this just means that the task will probably be scheduled soon unless:<br/>
- The scheduler is down or under heavy load<br/>
{}
<br/>
If this task instance does not start soon please contact your Airflow administrator for assistance."""
.format(
"- This task instance already ran and had it's state changed manually (e.g. cleared in the UI)<br/>"
if ti.state == State.NONE else "")))]
# Use the scheduler's context to figure out which dependencies are not met
dep_context = DepContext(SCHEDULER_DEPS)
failed_dep_reasons = [(dep.dep_name, dep.reason) for dep in
ti.get_failed_dep_statuses(
dep_context=dep_context)]
title = "Task Instance Details"
return self.render(
'airflow/task.html',
task_attrs=task_attrs,
ti_attrs=ti_attrs,
failed_dep_reasons=failed_dep_reasons or no_failed_deps_result,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
dag=dag, title=title)
@expose('/xcom')
@login_required
@wwwutils.action_logging
@provide_session
def xcom(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
xcomlist = session.query(XCom).filter(
XCom.dag_id == dag_id, XCom.task_id == task_id,
XCom.execution_date == dttm).all()
attributes = []
for xcom in xcomlist:
if not xcom.key.startswith('_'):
attributes.append((xcom.key, xcom.value))
title = "XCom"
return self.render(
'airflow/xcom.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
form=form,
dag=dag, title=title)
@expose('/run')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def run(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = pendulum.parse(execution_date)
ignore_all_deps = request.args.get('ignore_all_deps') == "true"
ignore_task_deps = request.args.get('ignore_task_deps') == "true"
ignore_ti_state = request.args.get('ignore_ti_state') == "true"
try:
from airflow.executors import GetDefaultExecutor
from airflow.executors.celery_executor import CeleryExecutor
executor = GetDefaultExecutor()
if not isinstance(executor, CeleryExecutor):
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
except ImportError:
# in case CeleryExecutor cannot be imported it is not active either
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
ti.refresh_from_db()
# Make sure the task instance can be queued
dep_context = DepContext(
deps=QUEUE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
if failed_deps:
failed_deps_str = ", ".join(
["{}: {}".format(dep.dep_name, dep.reason) for dep in failed_deps])
flash("Could not queue task instance for execution, dependencies not met: "
"{}".format(failed_deps_str),
"error")
return redirect(origin)
executor.start()
executor.queue_task_instance(
ti,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
@expose('/trigger')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def trigger(self):
dag_id = request.args.get('dag_id')
origin = request.args.get('origin') or "/admin/"
dag = dagbag.get_dag(dag_id)
if not dag:
flash("Cannot find dag {}".format(dag_id))
return redirect(origin)
execution_date = timezone.utcnow()
run_id = "manual__{0}".format(execution_date.isoformat())
dr = DagRun.find(dag_id=dag_id, run_id=run_id)
if dr:
flash("This run_id {} already exists".format(run_id))
return redirect(origin)
run_conf = {}
dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True
)
flash(
"Triggered {}, "
"it should start any moment now.".format(dag_id))
return redirect(origin)
def _clear_dag_tis(self, dag, start_date, end_date, origin,
recursive=False, confirmed=False):
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
tis = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
dry_run=True)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=("Here's the list of task instances you are about "
"to clear:"),
details=details)
return response
@expose('/clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
execution_date = request.args.get('execution_date')
execution_date = pendulum.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
recursive = request.args.get('recursive') == "true"
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
return self._clear_dag_tis(dag, start_date, end_date, origin,
recursive=recursive, confirmed=confirmed)
@expose('/dagrun_clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == "true"
dag = dagbag.get_dag(dag_id)
execution_date = pendulum.parse(execution_date)
start_date = execution_date
end_date = execution_date
return self._clear_dag_tis(dag, start_date, end_date, origin,
recursive=True, confirmed=confirmed)
@expose('/blocked')
@login_required
@provide_session
def blocked(self, session=None):
DR = models.DagRun
dags = (
session.query(DR.dag_id, sqla.func.count(DR.id))
.filter(DR.state == State.RUNNING)
.group_by(DR.dag_id)
.all()
)
payload = []
for dag_id, active_dag_runs in dags:
max_active_runs = 0
if dag_id in dagbag.dags:
max_active_runs = dagbag.dags[dag_id].max_active_runs
payload.append({
'dag_id': dag_id,
'active_dag_run': active_dag_runs,
'max_active_runs': max_active_runs,
})
return wwwutils.json_response(payload)
@expose('/dagrun_success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_success(self):
dag_id = request.args.get('dag_id')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == 'true'
origin = request.args.get('origin')
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = pendulum.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag:
flash('Cannot find DAG: {}'.format(dag_id), 'error')
return redirect(origin)
new_dag_state = set_dag_run_state(dag, execution_date, state=State.SUCCESS,
commit=confirmed)
if confirmed:
flash('Marked success on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
response = self.render('airflow/confirm.html',
message=("Here's the list of task instances you are "
"about to mark as successful:"),
details=details)
return response
@expose('/success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def success(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
task.dag = dag
execution_date = request.args.get('execution_date')
execution_date = pendulum.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
if not dag:
flash("Cannot find DAG: {}".format(dag_id))
return redirect(origin)
if not task:
flash("Cannot find task {} in DAG {}".format(task_id, dag.dag_id))
return redirect(origin)
from airflow.api.common.experimental.mark_tasks import set_state
if confirmed:
altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=State.SUCCESS,
commit=True)
flash("Marked success on {} task instances".format(len(altered)))
return redirect(origin)
to_be_altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=State.SUCCESS,
commit=False)
details = "\n".join([str(t) for t in to_be_altered])
response = self.render("airflow/confirm.html",
message=("Here's the list of task instances you are "
"about to mark as successful:"),
details=details)
return response
@expose('/tree')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
@provide_session
def tree(self, session=None):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
DR = models.DagRun
dag_runs = (
session.query(DR)
.filter(
DR.dag_id == dag.dag_id,
DR.execution_date <= base_date,
DR.execution_date >= min_date)
.all()
)
dag_runs = {
dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs}
dates = sorted(list(dag_runs.keys()))
max_date = max(dates) if dates else None
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
task_instances = {}
for ti in tis:
tid = alchemy_to_dict(ti)
dr = dag_runs.get(ti.execution_date)
tid['external_trigger'] = dr['external_trigger'] if dr else False
task_instances[(ti.task_id, ti.execution_date)] = tid
expanded = []
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = [0]
node_limit = 5000 / max(1, len(dag.roots))
def recurse_nodes(task, visited):
visited.add(task)
node_count[0] += 1
children = [
recurse_nodes(t, visited) for t in task.upstream_list
if node_count[0] < node_limit or t not in visited]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
children_key = 'children'
if task.task_id not in expanded:
expanded.append(task.task_id)
elif children:
children_key = "_children"
def set_duration(tid):
if (isinstance(tid, dict) and tid.get("state") == State.RUNNING and
tid["start_date"] is not None):
d = timezone.utcnow() - pendulum.parse(tid["start_date"])
tid["duration"] = d.total_seconds()
return tid
return {
'name': task.task_id,
'instances': [
set_duration(task_instances.get((task.task_id, d))) or {
'execution_date': d.isoformat(),
'task_id': task.task_id
}
for d in dates],
children_key: children,
'num_dep': len(task.upstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'start_date': task.start_date,
'end_date': task.end_date,
'depends_on_past': task.depends_on_past,
'ui_color': task.ui_color,
}
data = {
'name': '[DAG]',
'children': [recurse_nodes(t, set()) for t in dag.roots],
'instances': [
dag_runs.get(d) or {'execution_date': d.isoformat()}
for d in dates],
}
data = json.dumps(data, indent=4, default=json_ser)
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
return self.render(
'airflow/tree.html',
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
root=root,
form=form,
dag=dag, data=data, blur=blur)
@expose('/graph')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
@provide_session
def graph(self, session=None):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
arrange = request.args.get('arrange', dag.orientation)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
}
})
def get_upstream(task):
for t in task.upstream_list:
edge = {
'u': t.task_id,
'v': task.task_id,
}
if edge not in edges:
edges.append(edge)
get_upstream(t)
for t in dag.roots:
get_upstream(t)
dttm = request.args.get('execution_date')
if dttm:
dttm = pendulum.parse(dttm)
else:
dttm = dag.latest_execution_date or timezone.utcnow()
DR = models.DagRun
drs = (
session.query(DR)
.filter_by(dag_id=dag_id)
.order_by(desc(DR.execution_date)).all()
)
dr_choices = []
dr_state = None
for dr in drs:
dr_choices.append((dr.execution_date.isoformat(), dr.run_id))
if dttm == dr.execution_date:
dr_state = dr.state
class GraphForm(Form):
execution_date = SelectField("DAG run", choices=dr_choices)
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
form = GraphForm(
data={'execution_date': dttm.isoformat(), 'arrange': arrange})
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
}
for t in dag.tasks}
if not tasks:
flash("No tasks found", "error")
session.commit()
doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') and dag.doc_md else ''
return self.render(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
state_token=state_token(dr_state),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=json.dumps(task_instances, indent=2),
tasks=json.dumps(tasks, indent=2),
nodes=json.dumps(nodes, indent=2),
edges=json.dumps(edges, indent=2), )
@expose('/duration')
@login_required
@wwwutils.action_logging
@provide_session
def duration(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = pendulum.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
cum_chart = nvd3.lineChart(
name="cumLineChart", x_is_date=True, height=chart_height, width="1200")
y = defaultdict(list)
x = defaultdict(list)
cum_y = defaultdict(list)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
TF = models.TaskFail
ti_fails = (
session
.query(TF)
.filter(
TF.dag_id == dag.dag_id,
TF.execution_date >= min_date,
TF.execution_date <= base_date,
TF.task_id.in_([t.task_id for t in dag.tasks]))
.all()
)
fails_totals = defaultdict(int)
for tf in ti_fails:
dict_key = (tf.dag_id, tf.task_id, tf.execution_date)
fails_totals[dict_key] += tf.duration
for ti in tis:
if ti.duration:
dttm = wwwutils.epoch(ti.execution_date)
x[ti.task_id].append(dttm)
y[ti.task_id].append(float(ti.duration))
fails_dict_key = (ti.dag_id, ti.task_id, ti.execution_date)
fails_total = fails_totals[fails_dict_key]
cum_y[ti.task_id].append(float(ti.duration + fails_total))
# determine the most relevant time unit for the set of task instance
# durations for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
cum_y_unit = infer_time_unit([d for t in cum_y.values() for d in t])
# update the y Axis on both charts to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(y_unit))
chart.axislist['yAxis']['axisLabelDistance'] = '40'
cum_chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(cum_y_unit))
cum_chart.axislist['yAxis']['axisLabelDistance'] = '40'
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
cum_chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(cum_y[task.task_id],
cum_y_unit))
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
cum_chart.buildcontent()
s_index = cum_chart.htmlcontent.rfind('});')
cum_chart.htmlcontent = (cum_chart.htmlcontent[:s_index] +
"$(function() {$( document ).trigger('chartload') })" +
cum_chart.htmlcontent[s_index:])
return self.render(
'airflow/duration_chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent,
cum_chart=cum_chart.htmlcontent
)
@expose('/tries')
@login_required
@wwwutils.action_logging
@provide_session
def tries(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = pendulum.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, y_axis_format='d', height=chart_height,
width="1200")
for task in dag.tasks:
y = []
x = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
dttm = wwwutils.epoch(ti.execution_date)
x.append(dttm)
y.append(ti.try_number)
if x:
chart.add_serie(name=task.task_id, x=x, y=y)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
tries = sorted(list({ti.try_number for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if tries else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent
)
@expose('/landing_times')
@login_required
@wwwutils.action_logging
@provide_session
def landing_times(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = pendulum.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
y = {}
x = {}
for task in dag.tasks:
y[task.task_id] = []
x[task.task_id] = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
ts = ti.execution_date
if dag.schedule_interval and dag.following_schedule(ts):
ts = dag.following_schedule(ts)
if ti.end_date:
dttm = wwwutils.epoch(ti.execution_date)
secs = (ti.end_date - ts).total_seconds()
x[ti.task_id].append(dttm)
y[ti.task_id].append(secs)
# determine the most relevant time unit for the set of landing times
# for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
# update the y Axis to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Landing Time ({})'.format(y_unit))
chart.axislist['yAxis']['axisLabelDistance'] = '40'
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
chart=chart.htmlcontent,
height=str(chart_height + 100) + "px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
)
@expose('/paused', methods=['POST'])
@login_required
@wwwutils.action_logging
@provide_session
def paused(self, session=None):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if request.args.get('is_paused') == 'false':
orm_dag.is_paused = True
else:
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.get_dag(dag_id)
return "OK"
@expose('/refresh')
@login_required
@wwwutils.action_logging
@provide_session
def refresh(self, session=None):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = timezone.utcnow()
session.merge(orm_dag)
session.commit()
dagbag.get_dag(dag_id)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect(request.referrer)
@expose('/refresh_all')
@login_required
@wwwutils.action_logging
def refresh_all(self):
dagbag.collect_dags(only_if_updated=False)
flash("All DAGs are now up to date")
return redirect('/')
@expose('/gantt')
@login_required
@wwwutils.action_logging
@provide_session
def gantt(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dttm = request.args.get('execution_date')
if dttm:
dttm = pendulum.parse(dttm)
else:
dttm = dag.latest_execution_date or timezone.utcnow()
form = DateTimeForm(data={'execution_date': dttm})
tis = [
ti for ti in dag.get_task_instances(session, dttm, dttm)
if ti.start_date]
tis = sorted(tis, key=lambda ti: ti.start_date)
tasks = []
for ti in tis:
end_date = ti.end_date if ti.end_date else timezone.utcnow()
tasks.append({
'startDate': wwwutils.epoch(ti.start_date),
'endDate': wwwutils.epoch(end_date),
'isoStart': ti.start_date.isoformat()[:-4],
'isoEnd': end_date.isoformat()[:-4],
'taskName': ti.task_id,
'duration': "{}".format(end_date - ti.start_date)[:-4],
'status': ti.state,
'executionDate': ti.execution_date.isoformat(),
})
states = {ti.state: ti.state for ti in tis}
data = {
'taskNames': [ti.task_id for ti in tis],
'tasks': tasks,
'taskStatus': states,
'height': len(tis) * 25 + 25,
}
session.commit()
return self.render(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
data=json.dumps(data, indent=2),
base_date='',
demo_mode=demo_mode,
root=root,
)
@expose('/object/task_instances')
@login_required
@wwwutils.action_logging
@provide_session
def task_instances(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
dttm = request.args.get('execution_date')
if dttm:
dttm = pendulum.parse(dttm)
else:
return ("Error: Invalid execution_date")
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
return json.dumps(task_instances)
@expose('/variables/<form>', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def variables(self, form):
try:
if request.method == 'POST':
data = request.json
if data:
with create_session() as session:
var = models.Variable(key=form, val=json.dumps(data))
session.add(var)
session.commit()
return ""
else:
return self.render(
'airflow/variables/{}.html'.format(form)
)
except:
# prevent XSS
form = escape(form)
return ("Error: form airflow/variables/{}.html "
"not found.").format(form), 404
@expose('/varimport', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def varimport(self):
try:
out = str(request.files['file'].read())
d = json.loads(out)
except Exception:
flash("Missing file or syntax error.")
else:
for k, v in d.items():
models.Variable.set(k, v, serialize_json=isinstance(v, dict))
flash("{} variable(s) successfully updated.".format(len(d)))
return redirect('/admin/variable')
class HomeView(AdminIndexView):
@expose("/")
@login_required
@provide_session
def index(self, session=None):
DM = models.DagModel
# restrict the dags shown if filter_by_owner and current user is not superuser
do_filter = FILTER_BY_OWNER and (not current_user.is_superuser())
owner_mode = conf.get('webserver', 'OWNER_MODE').strip().lower()
hide_paused_dags_by_default = conf.getboolean('webserver',
'hide_paused_dags_by_default')
show_paused_arg = request.args.get('showPaused', 'None')
def get_int_arg(value, default=0):
try:
return int(value)
except ValueError:
return default
arg_current_page = request.args.get('page', '0')
arg_search_query = request.args.get('search', None)
dags_per_page = PAGE_SIZE
current_page = get_int_arg(arg_current_page, default=0)
if show_paused_arg.strip().lower() == 'false':
hide_paused = True
elif show_paused_arg.strip().lower() == 'true':
hide_paused = False
else:
hide_paused = hide_paused_dags_by_default
# read orm_dags from the db
sql_query = session.query(DM)
if do_filter and owner_mode == 'ldapgroup':
sql_query = sql_query.filter(
~DM.is_subdag,
DM.is_active,
DM.owners.in_(current_user.ldap_groups)
)
elif do_filter and owner_mode == 'user':
sql_query = sql_query.filter(
~DM.is_subdag, DM.is_active,
DM.owners == current_user.user.username
)
else:
sql_query = sql_query.filter(
~DM.is_subdag, DM.is_active
)
# optionally filter out "paused" dags
if hide_paused:
sql_query = sql_query.filter(~DM.is_paused)
orm_dags = {dag.dag_id: dag for dag
in sql_query
.all()}
import_errors = session.query(models.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"error")
# get a list of all non-subdag dags visible to everyone
# optionally filter out "paused" dags
if hide_paused:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag and not dag.is_paused]
else:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag]
# optionally filter to get only dags that the user should see
if do_filter and owner_mode == 'ldapgroup':
# only show dags owned by someone in @current_user.ldap_groups
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner in current_user.ldap_groups
}
elif do_filter and owner_mode == 'user':
# only show dags owned by @current_user.user.username
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner == current_user.user.username
}
else:
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
}
if arg_search_query:
lower_search_query = arg_search_query.lower()
# filter by dag_id
webserver_dags_filtered = {
dag_id: dag
for dag_id, dag in webserver_dags.items()
if (lower_search_query in dag_id.lower() or
lower_search_query in dag.owner.lower())
}
all_dag_ids = (set([dag.dag_id for dag in orm_dags.values()
if lower_search_query in dag.dag_id.lower() or
lower_search_query in dag.owners.lower()]) |
set(webserver_dags_filtered.keys()))
sorted_dag_ids = sorted(all_dag_ids)
else:
webserver_dags_filtered = webserver_dags
sorted_dag_ids = sorted(set(orm_dags.keys()) | set(webserver_dags.keys()))
start = current_page * dags_per_page
end = start + dags_per_page
num_of_all_dags = len(sorted_dag_ids)
page_dag_ids = sorted_dag_ids[start:end]
num_of_pages = int(math.ceil(num_of_all_dags / float(dags_per_page)))
auto_complete_data = set()
for dag in webserver_dags_filtered.values():
auto_complete_data.add(dag.dag_id)
auto_complete_data.add(dag.owner)
for dag in orm_dags.values():
auto_complete_data.add(dag.dag_id)
auto_complete_data.add(dag.owners)
return self.render(
'airflow/dags.html',
webserver_dags=webserver_dags_filtered,
orm_dags=orm_dags,
hide_paused=hide_paused,
current_page=current_page,
search_query=arg_search_query if arg_search_query else '',
page_size=dags_per_page,
num_of_pages=num_of_pages,
num_dag_from=start + 1,
num_dag_to=min(end, num_of_all_dags),
num_of_all_dags=num_of_all_dags,
paging=wwwutils.generate_pages(current_page, num_of_pages,
search=arg_search_query,
showPaused=not hide_paused),
dag_ids_in_page=page_dag_ids,
auto_complete_data=auto_complete_data)
class QueryView(wwwutils.DataProfilingMixin, BaseView):
@expose('/', methods=['POST', 'GET'])
@wwwutils.gzipped
@provide_session
def query(self, session=None):
dbs = session.query(models.Connection).order_by(
models.Connection.conn_id).all()
session.expunge_all()
db_choices = list(
((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))
conn_id_str = request.form.get('conn_id')
csv = request.form.get('csv') == "true"
sql = request.form.get('sql')
class QueryForm(Form):
conn_id = SelectField("Layout", choices=db_choices)
sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget())
data = {
'conn_id': conn_id_str,
'sql': sql,
}
results = None
has_data = False
error = False
if conn_id_str:
db = [db for db in dbs if db.conn_id == conn_id_str][0]
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type))
# df = hook.get_pandas_df(sql)
has_data = len(df) > 0
df = df.fillna('')
results = df.to_html(
classes=[
'table', 'table-bordered', 'table-striped', 'no-wrap'],
index=False,
na_rep='',
) if has_data else ''
except Exception as e:
flash(str(e), 'error')
error = True
if has_data and len(df) == QUERY_LIMIT:
flash(
"Query output truncated at " + str(QUERY_LIMIT) +
" rows", 'info')
if not has_data and error:
flash('No data', 'error')
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
form = QueryForm(request.form, data=data)
session.commit()
return self.render(
'airflow/query.html', form=form,
title="Ad Hoc Query",
results=results or '',
has_data=has_data)
class AirflowModelView(ModelView):
list_template = 'airflow/model_list.html'
edit_template = 'airflow/model_edit.html'
create_template = 'airflow/model_create.html'
column_display_actions = True
page_size = PAGE_SIZE
class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):
"""
Modifying the base ModelView class for non edit, browse only operations
"""
named_filter_urls = True
can_create = False
can_edit = False
can_delete = False
column_display_pk = True
class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):
column_list = ('pool', 'slots', 'used_slots', 'queued_slots')
column_formatters = dict(
pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)
named_filter_urls = True
form_args = {
'pool': {
'validators': [
validators.DataRequired(),
]
}
}
class SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly):
verbose_name_plural = "SLA misses"
verbose_name = "SLA miss"
column_list = (
'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')
column_formatters = dict(
task_id=task_instance_link,
execution_date=datetime_f,
timestamp=datetime_f,
dag_id=dag_link)
named_filter_urls = True
column_searchable_list = ('dag_id', 'task_id',)
column_filters = (
'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')
filter_converter = wwwutils.UtcFilterConverter()
form_widget_args = {
'email_sent': {'disabled': True},
'timestamp': {'disabled': True},
}
@provide_session
def _connection_ids(session=None):
return [
(c.conn_id, c.conn_id)
for c in (
session.query(models.Connection.conn_id)
.group_by(models.Connection.conn_id)
)
]
class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "chart"
verbose_name_plural = "charts"
form_columns = (
'label',
'owner',
'conn_id',
'chart_type',
'show_datatable',
'x_is_date',
'y_log_scale',
'show_sql',
'height',
'sql_layout',
'sql',
'default_params',
)
column_list = (
'label',
'conn_id',
'chart_type',
'owner',
'last_modified',
)
column_sortable_list = (
'label',
'conn_id',
'chart_type',
('owner', 'owner.username'),
'last_modified',
)
column_formatters = dict(label=label_link, last_modified=datetime_f)
column_default_sort = ('last_modified', True)
create_template = 'airflow/chart/create.html'
edit_template = 'airflow/chart/edit.html'
column_filters = ('label', 'owner.username', 'conn_id')
column_searchable_list = ('owner.username', 'label', 'sql')
column_descriptions = {
'label': "Can include {{ templated_fields }} and {{ macros }}",
'chart_type': "The type of chart to be displayed",
'sql': "Can include {{ templated_fields }} and {{ macros }}.",
'height': "Height of the chart, in pixels.",
'conn_id': "Source database to run the query against",
'x_is_date': (
"Whether the X axis should be casted as a date field. Expect most "
"intelligible date formats to get casted properly."
),
'owner': (
"The chart's owner, mostly used for reference and filtering in "
"the list view."
),
'show_datatable':
"Whether to display an interactive data table under the chart.",
'default_params': (
'A dictionary of {"key": "values",} that define what the '
'templated fields (parameters) values should be by default. '
'To be valid, it needs to "eval" as a Python dict. '
'The key values will show up in the url\'s querystring '
'and can be altered there.'
),
'show_sql': "Whether to display the SQL statement as a collapsible "
"section in the chart page.",
'y_log_scale': "Whether to use a log scale for the Y axis.",
'sql_layout': (
"Defines the layout of the SQL that the application should "
"expect. Depending on the tables you are sourcing from, it may "
"make more sense to pivot / unpivot the metrics."
),
}
column_labels = {
'sql': "SQL",
'height': "Chart Height",
'sql_layout': "SQL Layout",
'show_sql': "Display the SQL Statement",
'default_params': "Default Parameters",
}
form_choices = {
'chart_type': [
('line', 'Line Chart'),
('spline', 'Spline Chart'),
('bar', 'Bar Chart'),
('column', 'Column Chart'),
('area', 'Overlapping Area Chart'),
('stacked_area', 'Stacked Area Chart'),
('percent_area', 'Percent Area Chart'),
('datatable', 'No chart, data table only'),
],
'sql_layout': [
('series', 'SELECT series, x, y FROM ...'),
('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),
],
'conn_id': _connection_ids()
}
def on_model_change(self, form, model, is_created=True):
if model.iteration_no is None:
model.iteration_no = 0
else:
model.iteration_no += 1
if not model.user_id and current_user and hasattr(current_user, 'id'):
model.user_id = current_user.id
model.last_modified = timezone.utcnow()
chart_mapping = (
('line', 'lineChart'),
('spline', 'lineChart'),
('bar', 'multiBarChart'),
('column', 'multiBarChart'),
('area', 'stackedAreaChart'),
('stacked_area', 'stackedAreaChart'),
('percent_area', 'stackedAreaChart'),
('datatable', 'datatable'),
)
chart_mapping = dict(chart_mapping)
class KnownEventView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "known event"
verbose_name_plural = "known events"
form_columns = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
'description',
)
form_args = {
'label': {
'validators': [
validators.DataRequired(),
],
},
'event_type': {
'validators': [
validators.DataRequired(),
],
},
'start_date': {
'validators': [
validators.DataRequired(),
],
'filters': [
parse_datetime_f,
],
},
'end_date': {
'validators': [
validators.DataRequired(),
GreaterEqualThan(fieldname='start_date'),
],
'filters': [
parse_datetime_f,
]
},
'reported_by': {
'validators': [
validators.DataRequired(),
],
}
}
column_list = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
)
column_default_sort = ("start_date", True)
column_sortable_list = (
'label',
# todo: yes this has a spelling error
('event_type', 'event_type.know_event_type'),
'start_date',
'end_date',
('reported_by', 'reported_by.username'),
)
filter_converter = wwwutils.UtcFilterConverter()
form_overrides = dict(start_date=DateTimeField, end_date=DateTimeField)
class KnownEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):
pass
# NOTE: For debugging / troubleshooting
# mv = KnowEventTypeView(
# models.KnownEventType,
# Session, name="Known Event Types", category="Manage")
# admin.add_view(mv)
# class DagPickleView(SuperUserMixin, ModelView):
# pass
# mv = DagPickleView(
# models.DagPickle,
# Session, name="Pickles", category="Manage")
# admin.add_view(mv)
class VariableView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "Variable"
verbose_name_plural = "Variables"
list_template = 'airflow/variable_list.html'
def hidden_field_formatter(view, context, model, name):
if wwwutils.should_hide_value_for_key(model.key):
return Markup('*' * 8)
try:
return getattr(model, name)
except AirflowException:
return Markup('<span class="label label-danger">Invalid</span>')
form_columns = (
'key',
'val',
)
column_list = ('key', 'val', 'is_encrypted',)
column_filters = ('key', 'val')
column_searchable_list = ('key', 'val')
column_default_sort = ('key', False)
form_widget_args = {
'is_encrypted': {'disabled': True},
'val': {
'rows': 20,
}
}
form_args = {
'key': {
'validators': {
validators.DataRequired(),
},
},
}
column_sortable_list = (
'key',
'val',
'is_encrypted',
)
column_formatters = {
'val': hidden_field_formatter,
}
# Default flask-admin export functionality doesn't handle serialized json
@action('varexport', 'Export', None)
@provide_session
def action_varexport(self, ids, session=None):
V = models.Variable
qry = session.query(V).filter(V.id.in_(ids)).all()
var_dict = {}
d = json.JSONDecoder()
for var in qry:
val = None
try:
val = d.decode(var.val)
except:
val = var.val
var_dict[var.key] = val
response = make_response(json.dumps(var_dict, sort_keys=True, indent=4))
response.headers["Content-Disposition"] = "attachment; filename=variables.json"
return response
def on_form_prefill(self, form, id):
if wwwutils.should_hide_value_for_key(form.key.data):
form.val.data = '*' * 8
class XComView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "XCom"
verbose_name_plural = "XComs"
form_columns = (
'key',
'value',
'execution_date',
'task_id',
'dag_id',
)
form_extra_fields = {
'value': StringField('Value'),
}
form_args = {
'execution_date': {
'filters': [
parse_datetime_f,
]
}
}
column_filters = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
column_searchable_list = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
filter_converter = wwwutils.UtcFilterConverter()
form_overrides = dict(execution_date=DateTimeField)
class JobModelView(ModelViewOnly):
verbose_name_plural = "jobs"
verbose_name = "job"
column_display_actions = False
column_default_sort = ('start_date', True)
column_filters = (
'job_type', 'dag_id', 'state',
'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')
column_formatters = dict(
start_date=datetime_f,
end_date=datetime_f,
hostname=nobr_f,
state=state_f,
latest_heartbeat=datetime_f)
filter_converter = wwwutils.UtcFilterConverter()
class DagRunModelView(ModelViewOnly):
verbose_name_plural = "DAG Runs"
can_edit = True
can_create = True
column_editable_list = ('state',)
verbose_name = "dag run"
column_default_sort = ('execution_date', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
form_args = dict(
dag_id=dict(validators=[validators.DataRequired()])
)
column_list = (
'state', 'dag_id', 'execution_date', 'run_id', 'external_trigger')
column_filters = column_list
filter_converter = wwwutils.UtcFilterConverter()
column_searchable_list = ('dag_id', 'state', 'run_id')
column_formatters = dict(
execution_date=datetime_f,
state=state_f,
start_date=datetime_f,
dag_id=dag_link,
run_id=dag_run_link
)
@action('new_delete', "Delete", "Are you sure you want to delete selected records?")
@provide_session
def action_new_delete(self, ids, session=None):
deleted = set(session.query(models.DagRun)
.filter(models.DagRun.id.in_(ids))
.all())
session.query(models.DagRun) \
.filter(models.DagRun.id.in_(ids)) \
.delete(synchronize_session='fetch')
session.commit()
dirty_ids = []
for row in deleted:
dirty_ids.append(row.dag_id)
models.DagStat.update(dirty_ids, dirty_only=False, session=session)
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_dagrun_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_dagrun_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_dagrun_state(ids, State.SUCCESS)
@provide_session
def set_dagrun_state(self, ids, target_state, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
for dr in session.query(DR).filter(DR.id.in_(ids)).all():
dirty_ids.append(dr.dag_id)
count += 1
dr.state = target_state
if target_state == State.RUNNING:
dr.start_date = timezone.utcnow()
else:
dr.end_date = timezone.utcnow()
session.commit()
models.DagStat.update(dirty_ids, session=session)
flash(
"{count} dag runs were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
class LogModelView(ModelViewOnly):
verbose_name_plural = "logs"
verbose_name = "log"
column_display_actions = False
column_default_sort = ('dttm', True)
column_filters = ('dag_id', 'task_id', 'execution_date')
filter_converter = wwwutils.UtcFilterConverter()
column_formatters = dict(
dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)
class TaskInstanceModelView(ModelViewOnly):
verbose_name_plural = "task instances"
verbose_name = "task instance"
column_filters = (
'state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool', 'operator', 'start_date', 'end_date')
filter_converter = wwwutils.UtcFilterConverter()
named_filter_urls = True
column_formatters = dict(
log_url=log_url_formatter,
task_id=task_instance_link,
hostname=nobr_f,
state=state_f,
execution_date=datetime_f,
start_date=datetime_f,
end_date=datetime_f,
queued_dttm=datetime_f,
dag_id=dag_link,
run_id=dag_run_link,
duration=duration_f)
column_searchable_list = ('dag_id', 'task_id', 'state')
column_default_sort = ('job_id', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
column_list = (
'state', 'dag_id', 'task_id', 'execution_date', 'operator',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number',
'pool', 'log_url')
page_size = PAGE_SIZE
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_task_instance_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_task_instance_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_task_instance_state(ids, State.SUCCESS)
@action('set_retry', "Set state to 'up_for_retry'", None)
def action_set_retry(self, ids):
self.set_task_instance_state(ids, State.UP_FOR_RETRY)
@provide_session
@action('clear',
lazy_gettext('Clear'),
lazy_gettext(
'Are you sure you want to clear the state of the selected task instance(s)'
' and set their dagruns to the running state?'))
def action_clear(self, ids, session=None):
try:
TI = models.TaskInstance
dag_to_task_details = {}
dag_to_tis = {}
# Collect dags upfront as dagbag.get_dag() will reset the session
for id_str in ids:
task_id, dag_id, execution_date = id_str.split(',')
dag = dagbag.get_dag(dag_id)
task_details = dag_to_task_details.setdefault(dag, [])
task_details.append((task_id, execution_date))
for dag, task_details in dag_to_task_details.items():
for task_id, execution_date in task_details:
execution_date = parse_execution_date(execution_date)
ti = session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag.dag_id,
TI.execution_date == execution_date).one()
tis = dag_to_tis.setdefault(dag, [])
tis.append(ti)
for dag, tis in dag_to_tis.items():
models.clear_task_instances(tis, session, dag=dag)
session.commit()
flash("{0} task instances have been cleared".format(len(ids)))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to clear task instances', 'error')
@provide_session
def set_task_instance_state(self, ids, target_state, session=None):
try:
TI = models.TaskInstance
count = len(ids)
for id in ids:
task_id, dag_id, execution_date = id.split(',')
execution_date = parse_execution_date(execution_date)
ti = session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag_id,
TI.execution_date == execution_date).one()
ti.state = target_state
session.commit()
flash(
"{count} task instances were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
def get_one(self, id):
"""
As a workaround for AIRFLOW-252, this method overrides Flask-Admin's ModelView.get_one().
TODO: this method should be removed once the below bug is fixed on Flask-Admin side.
https://github.com/flask-admin/flask-admin/issues/1226
"""
task_id, dag_id, execution_date = iterdecode(id)
execution_date = pendulum.parse(execution_date)
return self.session.query(self.model).get((task_id, dag_id, execution_date))
class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):
create_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
list_template = 'airflow/conn_list.html'
form_columns = (
'conn_id',
'conn_type',
'host',
'schema',
'login',
'password',
'port',
'extra',
'extra__jdbc__drv_path',
'extra__jdbc__drv_clsname',
'extra__google_cloud_platform__project',
'extra__google_cloud_platform__key_path',
'extra__google_cloud_platform__keyfile_dict',
'extra__google_cloud_platform__scope',
)
verbose_name = "Connection"
verbose_name_plural = "Connections"
column_default_sort = ('conn_id', False)
column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted', 'is_extra_encrypted',)
form_overrides = dict(_password=PasswordField, _extra=TextAreaField)
form_widget_args = {
'is_extra_encrypted': {'disabled': True},
'is_encrypted': {'disabled': True},
}
# Used to customized the form, the forms elements get rendered
# and results are stored in the extra field as json. All of these
# need to be prefixed with extra__ and then the conn_type ___ as in
# extra__{conn_type}__name. You can also hide form elements and rename
# others from the connection_form.js file
form_extra_fields = {
'extra__jdbc__drv_path': StringField('Driver Path'),
'extra__jdbc__drv_clsname': StringField('Driver Class'),
'extra__google_cloud_platform__project': StringField('Project Id'),
'extra__google_cloud_platform__key_path': StringField('Keyfile Path'),
'extra__google_cloud_platform__keyfile_dict': PasswordField('Keyfile JSON'),
'extra__google_cloud_platform__scope': StringField('Scopes (comma separated)'),
}
form_choices = {
'conn_type': models.Connection._types
}
def on_model_change(self, form, model, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc', 'google_cloud_platform']:
extra = {
key: formdata[key]
for key in self.form_extra_fields.keys() if key in formdata}
model.extra = json.dumps(extra)
@classmethod
def alert_fernet_key(cls):
fk = None
try:
fk = conf.get('core', 'fernet_key')
except:
pass
return fk is None
@classmethod
def is_secure(cls):
"""
Used to display a message in the Connection list view making it clear
that the passwords and `extra` field can't be encrypted.
"""
is_secure = False
try:
import cryptography
conf.get('core', 'fernet_key')
is_secure = True
except:
pass
return is_secure
def on_form_prefill(self, form, id):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception:
d = {}
for field in list(self.form_extra_fields.keys()):
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
class UserModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "User"
verbose_name_plural = "Users"
column_default_sort = 'username'
class VersionView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def version(self):
# Look at the version from setup.py
try:
airflow_version = pkg_resources.require("apache-airflow")[0].version
except Exception as e:
airflow_version = None
logging.error(e)
# Get the Git repo and git hash
git_version = None
try:
with open(os.path.join(*[settings.AIRFLOW_HOME, 'airflow', 'git_version'])) as f:
git_version = f.readline()
except Exception as e:
logging.error(e)
# Render information
title = "Version Info"
return self.render('airflow/version.html',
title=title,
airflow_version=airflow_version,
git_version=git_version)
class ConfigurationView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def conf(self):
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = conf.AIRFLOW_CONFIG
if conf.getboolean("webserver", "expose_config"):
with open(conf.AIRFLOW_CONFIG, 'r') as f:
config = f.read()
table = [(section, key, value, source)
for section, parameters in conf.as_dict(True, True).items()
for key, (value, source) in parameters.items()]
else:
config = (
"# Your Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
table = None
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/config.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle,
table=table)
class DagModelView(wwwutils.SuperUserMixin, ModelView):
column_list = ('dag_id', 'owners')
column_editable_list = ('is_paused',)
form_excluded_columns = ('is_subdag', 'is_active')
column_searchable_list = ('dag_id',)
column_filters = (
'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',
'last_scheduler_run', 'last_expired')
filter_converter = wwwutils.UtcFilterConverter()
form_widget_args = {
'last_scheduler_run': {'disabled': True},
'fileloc': {'disabled': True},
'is_paused': {'disabled': True},
'last_pickled': {'disabled': True},
'pickle_id': {'disabled': True},
'last_loaded': {'disabled': True},
'last_expired': {'disabled': True},
'pickle_size': {'disabled': True},
'scheduler_lock': {'disabled': True},
'owners': {'disabled': True},
}
column_formatters = dict(
dag_id=dag_link,
)
can_delete = False
can_create = False
page_size = PAGE_SIZE
list_template = 'airflow/list_dags.html'
named_filter_urls = True
def get_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
| apache-2.0 |
nhejazi/scikit-learn | sklearn/cross_decomposition/tests/test_pls.py | 24 | 14995 | import numpy as np
from numpy.testing import assert_approx_equal
from sklearn.utils.testing import (assert_equal, assert_array_almost_equal,
assert_array_equal, assert_true,
assert_raise_message)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_, CCA
from sklearn.preprocessing import StandardScaler
from sklearn.utils import check_random_state
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
pls_bysvd.x_loadings_, decimal=5,
err_msg="nipals and svd implementations lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
pls_bysvd.y_loadings_, decimal=5,
err_msg="nipals and svd implementations lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
# x_weights_sign_flip holds columns of 1 or -1, depending on sign flip
# between R and python
x_weights_sign_flip = pls_ca.x_weights_ / x_weights
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
x_rotations_sign_flip = pls_ca.x_rotations_ / x_rotations
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
y_weights_sign_flip = pls_ca.y_weights_ / y_weights
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
y_rotations_sign_flip = pls_ca.y_rotations_ / y_rotations
# x_weights = X.dot(x_rotation)
# Hence R/python sign flip should be the same in x_weight and x_rotation
assert_array_almost_equal(x_rotations_sign_flip, x_weights_sign_flip)
# This test that R / python give the same result up to column
# sign indeterminacy
assert_array_almost_equal(np.abs(x_rotations_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4)
assert_array_almost_equal(y_rotations_sign_flip, y_weights_sign_flip)
assert_array_almost_equal(np.abs(y_rotations_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
x_weights_sign_flip = pls_2.x_weights_ / x_weights
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
x_loadings_sign_flip = pls_2.x_loadings_ / x_loadings
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
y_weights_sign_flip = pls_2.y_weights_ / y_weights
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
y_loadings_sign_flip = pls_2.y_loadings_ / y_loadings
# x_loadings[:, i] = Xi.dot(x_weights[:, i]) \forall i
assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(x_loadings_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4)
assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(y_loadings_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
rng = check_random_state(11)
l1 = rng.normal(size=n)
l2 = rng.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + rng.normal(size=4 * n).reshape((n, 4))
Y = latents + rng.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, rng.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, rng.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
x_weights_sign_flip = pls_ca.x_weights_ / x_weights
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
x_loadings_sign_flip = pls_ca.x_loadings_ / x_loadings
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
y_weights_sign_flip = pls_ca.y_weights_ / y_weights
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
y_loadings_sign_flip = pls_ca.y_loadings_ / y_loadings
assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(x_loadings_sign_flip), 1, 4)
assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(y_loadings_sign_flip), 1, 4)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specified number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_predict_transform_copy():
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSCanonical()
X_copy = X.copy()
Y_copy = Y.copy()
clf.fit(X, Y)
# check that results are identical with copy
assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False))
assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False))
# check also if passing Y
assert_array_almost_equal(clf.transform(X, Y),
clf.transform(X.copy(), Y.copy(), copy=False))
# check that copy doesn't destroy
# we do want to check exact equality here
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
def test_scale_and_stability():
# We test scale=True parameter
# This allows to check numerical stability over platforms as well
d = load_linnerud()
X1 = d.data
Y1 = d.target
# causes X[:, -1].std() to be zero
X1[:, -1] = 1.0
# From bug #2821
# Test with X2, T2 s.t. clf.x_score[:, 1] == 0, clf.y_score[:, 1] == 0
# This test robustness of algorithm when dealing with value close to 0
X2 = np.array([[0., 0., 1.],
[1., 0., 0.],
[2., 2., 2.],
[3., 5., 4.]])
Y2 = np.array([[0.1, -0.2],
[0.9, 1.1],
[6.2, 5.9],
[11.9, 12.3]])
for (X, Y) in [(X1, Y1), (X2, Y2)]:
X_std = X.std(axis=0, ddof=1)
X_std[X_std == 0] = 1
Y_std = Y.std(axis=0, ddof=1)
Y_std[Y_std == 0] = 1
X_s = (X - X.mean(axis=0)) / X_std
Y_s = (Y - Y.mean(axis=0)) / Y_std
for clf in [CCA(), pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
X_score, Y_score = clf.fit_transform(X, Y)
clf.set_params(scale=False)
X_s_score, Y_s_score = clf.fit_transform(X_s, Y_s)
assert_array_almost_equal(X_s_score, X_score)
assert_array_almost_equal(Y_s_score, Y_score)
# Scaling should be idempotent
clf.set_params(scale=True)
X_score, Y_score = clf.fit_transform(X_s, Y_s)
assert_array_almost_equal(X_s_score, X_score)
assert_array_almost_equal(Y_s_score, Y_score)
def test_pls_errors():
d = load_linnerud()
X = d.data
Y = d.target
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.n_components = 4
assert_raise_message(ValueError, "Invalid number of components",
clf.fit, X, Y)
def test_pls_scaling():
# sanity check for scale=True
n_samples = 1000
n_targets = 5
n_features = 10
rng = check_random_state(0)
Q = rng.randn(n_targets, n_features)
Y = rng.randn(n_samples, n_targets)
X = np.dot(Y, Q) + 2 * rng.randn(n_samples, n_features) + 1
X *= 1000
X_scaled = StandardScaler().fit_transform(X)
pls = pls_.PLSRegression(n_components=5, scale=True)
pls.fit(X, Y)
score = pls.score(X, Y)
pls.fit(X_scaled, Y)
score_scaled = pls.score(X_scaled, Y)
assert_approx_equal(score, score_scaled)
| bsd-3-clause |
hongguangguo/shogun | examples/undocumented/python_modular/graphical/classifier_perceptron_graphical.py | 26 | 2311 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import latex_plot_inits
parameter_list = [[20, 5, 1., 1000, 1, None, 5], [100, 5, 1., 1000, 1, None, 10]]
def classifier_perceptron_graphical(n=100, distance=5, learn_rate=1., max_iter=1000, num_threads=1, seed=None, nperceptrons=5):
from modshogun import RealFeatures, BinaryLabels
from modshogun import Perceptron
from modshogun import MSG_INFO
# 2D data
_DIM = 2
# To get the nice message that the perceptron has converged
dummy = BinaryLabels()
dummy.io.set_loglevel(MSG_INFO)
np.random.seed(seed)
# Produce some (probably) linearly separable training data by hand
# Two Gaussians at a far enough distance
X = np.array(np.random.randn(_DIM,n))+distance
Y = np.array(np.random.randn(_DIM,n))
label_train_twoclass = np.hstack((np.ones(n), -np.ones(n)))
fm_train_real = np.hstack((X,Y))
feats_train = RealFeatures(fm_train_real)
labels = BinaryLabels(label_train_twoclass)
perceptron = Perceptron(feats_train, labels)
perceptron.set_learn_rate(learn_rate)
perceptron.set_max_iter(max_iter)
perceptron.set_initialize_hyperplane(False)
# Find limits for visualization
x_min = min(np.min(X[0,:]), np.min(Y[0,:]))
x_max = max(np.max(X[0,:]), np.max(Y[0,:]))
y_min = min(np.min(X[1,:]), np.min(Y[1,:]))
y_max = max(np.max(X[1,:]), np.max(Y[1,:]))
for i in xrange(nperceptrons):
# Initialize randomly weight vector and bias
perceptron.set_w(np.random.random(2))
perceptron.set_bias(np.random.random())
# Run the perceptron algorithm
perceptron.train()
# Construct the hyperplane for visualization
# Equation of the decision boundary is w^T x + b = 0
b = perceptron.get_bias()
w = perceptron.get_w()
hx = np.linspace(x_min-1,x_max+1)
hy = -w[1]/w[0] * hx
plt.plot(hx, -1/w[1]*(w[0]*hx+b))
# Plot the two-class data
plt.scatter(X[0,:], X[1,:], s=40, marker='o', facecolors='none', edgecolors='b')
plt.scatter(Y[0,:], Y[1,:], s=40, marker='s', facecolors='none', edgecolors='r')
# Customize the plot
plt.axis([x_min-1, x_max+1, y_min-1, y_max+1])
plt.title('Rosenblatt\'s Perceptron Algorithm')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
return perceptron
if __name__=='__main__':
print('Perceptron graphical')
classifier_perceptron_graphical(*parameter_list[0])
| gpl-3.0 |
mkoledoye/mds_examples | animation.py | 2 | 2636 | from matplotlib import pyplot as plt
from matplotlib import animation
from core.config import Config
from utils import generate_dynamic_nodes
# number of mobile node transitions
NO_TRANS = 100
class NodeAnimation(object):
'''create animation of tag-anchor deployment from a given configuration of parameters'''
def __init__(self, cfg, data, show_trail=False):
self.fig = plt.figure()
self.data = data
self.show_trail = show_trail
self.no_of_anchors = cfg.no_of_anchors
self.no_of_tags = cfg.no_of_tags
def init_plot(self):
real_coords, mds_coords, *others = next(self.data)
self.anchors_scat = plt.scatter(real_coords[:self.no_of_anchors, 0], real_coords[:self.no_of_anchors, 1], color='blue', s=100, lw=1, label='Anchors positions', marker='o')
self.real_scat = plt.scatter(real_coords[self.no_of_anchors:, 0], real_coords[self.no_of_anchors:, 1], color='blue', s=100, lw=1, label='Tag positions', marker='o', facecolors='none')
self.mds_scat = plt.scatter(mds_coords[:, 0], mds_coords[:, 1], color='red', s=150, lw=2, label='Estimated positions', marker='+')
scatterplots = self.real_scat, self.mds_scat
if self.show_trail:
last_n_mds_coords = others[0][2:]
self.last_n_scat = plt.scatter(last_n_mds_coords[:, :, 0], last_n_mds_coords[:, :, 1], label='MDS iterations', color='magenta', s=0.5)
scatterplots += (self.last_n_scat,)
plt.legend(loc='best', scatterpoints=1, fontsize=11)
return scatterplots
def update_plot(self, _):
real_coords, mds_coords, *others = next(self.data)
self.real_scat.set_offsets(real_coords)
self.mds_scat.set_offsets(mds_coords)
scatterplots = self.real_scat, self.mds_scat
if self.show_trail:
last_n_mds_coords = others[0][2:]
self.last_n_scat.set_offsets(last_n_mds_coords)
scatterplots += (self.last_n_scat,)
return scatterplots
def draw_plot(self, save_to_file=False):
anim = animation.FuncAnimation(self.fig, self.update_plot, init_func=self.init_plot, interval=300)
# matplotlib bug? Not calling plt.show in the same scope as anim gives a blank figure
axes = plt.gca()
axes.set_xlabel('coordinate x [m]')
axes.set_ylabel('coordinate y [m]')
axes.set_xlim([-5, 35])
axes.set_ylim([-5, 29])
if save_to_file:
anim.save('mds_animation_filtering.mp4', extra_args=['-vcodec', 'libx264'])
plt.show()
if __name__ == '__main__':
cfg = Config(no_of_anchors=4, no_of_tags=7, missingdata=True, sigma=0)
data = generate_dynamic_nodes(cfg, algorithm='_smacof_with_anchors_single', no_of_trans=NO_TRANS, add_noise=True, filter_noise=False)
anim = NodeAnimation(cfg, data, show_trail=False)
anim.draw_plot()
| mit |
jseabold/scipy | scipy/signal/spectral.py | 14 | 34751 | """Tools for spectral analysis.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy import fftpack
from . import signaltools
from .windows import get_window
from ._spectral import lombscargle
import warnings
from scipy._lib.six import string_types
__all__ = ['periodogram', 'welch', 'lombscargle', 'csd', 'coherence',
'spectrogram']
def periodogram(x, fs=1.0, window=None, nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using a periodogram.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is an array it will be used
directly as the window. Defaults to None; equivalent to 'boxcar'.
nfft : int, optional
Length of the FFT used. If None the length of `x` will be used.
detrend : str or function or False, optional
Specifies how to detrend `x` prior to computing the spectrum. If
`detrend` is a string, it is passed as the ``type`` argument to
`detrend`. If it is a function, it should return a detrended array.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power spectrum
('spectrum') where `Pxx` has units of V**2, if `x` is measured in V
and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of `x`.
Notes
-----
.. versionadded:: 0.12.0
See Also
--------
welch: Estimate power spectral density using Welch's method
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.periodogram(x, fs)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([1e-7, 1e2])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.ylim([1e-4, 1e1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if window is None:
window = 'boxcar'
if nfft is None:
nperseg = x.shape[axis]
elif nfft == x.shape[axis]:
nperseg = nfft
elif nfft > x.shape[axis]:
nperseg = x.shape[axis]
elif nfft < x.shape[axis]:
s = [np.s_[:]]*len(x.shape)
s[axis] = np.s_[:nfft]
x = x[s]
nperseg = nfft
nfft = None
return welch(x, fs, window, nperseg, 0, nfft, detrend, return_onesided,
scaling, axis)
def welch(x, fs=1.0, window='hanning', nperseg=256, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using Welch's method.
Welch's method [1]_ computes an estimate of the power spectral density
by dividing the data into overlapping segments, computing a modified
periodogram for each segment and averaging the periodograms.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power spectrum
('spectrum') where `Pxx` has units of V**2, if `x` is measured in V
and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of x.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method [2]_.
.. versionadded:: 0.12.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([0.5e-3, 1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
freqs, Pxx = csd(x, x, fs, window, nperseg, noverlap, nfft, detrend,
return_onesided, scaling, axis)
return freqs, Pxx.real
def csd(x, y, fs=1.0, window='hanning', nperseg=256, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
"""
Estimate the cross power spectral density, Pxy, using Welch's method.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap: int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the CSD is computed for both inputs; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxy : ndarray
Cross spectral density or cross power spectrum of x,y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method. [Equivalent to csd(x,x)]
coherence: Magnitude squared coherence by Welch's method.
Notes
--------
By convention, Pxy is computed with the conjugate FFT of X multiplied by
the FFT of Y.
If the input series differ in length, the shorter series will be
zero-padded to match.
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50\% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Rabiner, Lawrence R., and B. Gold. "Theory and Application of
Digital Signal Processing" Prentice-Hall, pp. 414-419, 1975
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the magnitude of the cross spectral density.
>>> f, Pxy = signal.csd(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, np.abs(Pxy))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('CSD [V**2/Hz]')
>>> plt.show()
"""
freqs, _, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft,
detrend, return_onesided, scaling, axis,
mode='psd')
# Average over windows.
if len(Pxy.shape) >= 2 and Pxy.size > 0:
if Pxy.shape[-1] > 1:
Pxy = Pxy.mean(axis=-1)
else:
Pxy = np.reshape(Pxy, Pxy.shape[:-1])
return freqs, Pxy
def spectrogram(x, fs=1.0, window=('tukey',.25), nperseg=256, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='density', axis=-1):
"""
Compute a spectrogram with consecutive Fourier transforms.
Spectrograms can be used as a way of visualizing the change of a
nonstationary signal's frequency content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to a Tukey window with shape parameter of 0.25.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 8``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power spectrum
('spectrum') where `Pxx` has units of V**2, if `x` is measured in V
and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the spectrogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Sxx : ndarray
Spectrogram of x. By default, the last axis of Sxx corresponds to the
segment times.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. In contrast to welch's method, where the entire
data stream is averaged over, one may wish to use a smaller overlap (or
perhaps none at all) when computing a spectrogram, to maintain some
statistical independence between individual segments.
.. versionadded:: 0.16.0
References
----------
...[1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck "Discrete-Time
Signal Processing", Prentice Hall, 1999.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency linearly changes
with time from 1kHz to 2kHz, corrupted by 0.001 V**2/Hz of white noise
sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> freq = np.linspace(1e3, 2e3, N)
>>> x = amp * np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the spectrogram.
>>> f, t, Sxx = signal.spectrogram(x, fs)
>>> plt.pcolormesh(t, f, Sxx)
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
# Less overlap than welch, so samples are more statisically independent
if noverlap is None:
noverlap = nperseg // 8
freqs, time, Pxy = _spectral_helper(x, x, fs, window, nperseg, noverlap,
nfft, detrend, return_onesided, scaling,
axis, mode='psd')
return freqs, time, Pxy
def coherence(x, y, fs=1.0, window='hanning', nperseg=256, noverlap=None,
nfft=None, detrend='constant', axis=-1):
"""
Estimate the magnitude squared coherence estimate, Cxy, of discrete-time
signals X and Y using Welch's method.
Cxy = abs(Pxy)**2/(Pxx*Pyy), where Pxx and Pyy are power spectral density
estimates of X and Y, and Pxy is the cross spectral density estimate of X
and Y.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap: int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
axis : int, optional
Axis along which the coherence is computed for both inputs; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Cxy : ndarray
Magnitude squared coherence of x and y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
--------
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50\% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of Signals"
Prentice Hall, 2005
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the coherence.
>>> f, Cxy = signal.coherence(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, Cxy)
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Coherence')
>>> plt.show()
"""
freqs, Pxx = welch(x, fs, window, nperseg, noverlap, nfft, detrend,
axis=axis)
_, Pyy = welch(y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
_, Pxy = csd(x, y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
Cxy = np.abs(Pxy)**2 / Pxx / Pyy
return freqs, Cxy
def _spectral_helper(x, y, fs=1.0, window='hanning', nperseg=256,
noverlap=None, nfft=None, detrend='constant',
return_onesided=True, scaling='spectrum', axis=-1,
mode='psd'):
'''
Calculate various forms of windowed FFTs for PSD, CSD, etc.
This is a helper function that implements the commonality between the
psd, csd, and spectrogram functions. It is not designed to be called
externally. The windows are not averaged over; the result from each window
is returned.
Parameters
---------
x : array_like
Array or sequence containing the data to be analyzed.
y : array_like
Array or sequence containing the data to be analyzed. If this is
the same object in memoery as x (i.e. _spectral_helper(x, x, ...)),
the extra computations are spared.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
mode : str, optional
Defines what kind of return values are expected. Options are ['psd',
'complex', 'magnitude', 'angle', 'phase'].
Returns
-------
freqs : ndarray
Array of sample frequencies.
result : ndarray
Array of output data, contents dependant on *mode* kwarg.
t : ndarray
Array of times corresponding to each data segment
References
----------
stackoverflow: Rolling window for 1D arrays in Numpy?
<http://stackoverflow.com/a/6811241>
stackoverflow: Using strides for an efficient moving average filter
<http://stackoverflow.com/a/4947453>
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
'''
if mode not in ['psd', 'complex', 'magnitude', 'angle', 'phase']:
raise ValueError("Unknown value for mode %s, must be one of: "
"'default', 'psd', 'complex', "
"'magnitude', 'angle', 'phase'" % mode)
# If x and y are the same object we can save ourselves some computation.
same_data = y is x
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is not 'psd'")
axis = int(axis)
# Ensure we have np.arrays, get outdtype
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
outdtype = np.result_type(x,y,np.complex64)
else:
outdtype = np.result_type(x,np.complex64)
if not same_data:
# Check if we can broadcast the outer axes together
xouter = list(x.shape)
youter = list(y.shape)
xouter.pop(axis)
youter.pop(axis)
try:
outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
except ValueError:
raise ValueError('x and y cannot be broadcast together.')
if same_data:
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
else:
if x.size == 0 or y.size == 0:
outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)
emptyout = np.rollaxis(np.empty(outshape), -1, axis)
return emptyout, emptyout, emptyout
if x.ndim > 1:
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if not same_data and y.ndim > 1:
y = np.rollaxis(y, axis, len(y.shape))
# Check if x and y are the same length, zero-pad if neccesary
if not same_data:
if x.shape[-1] != y.shape[-1]:
if x.shape[-1] < y.shape[-1]:
pad_shape = list(x.shape)
pad_shape[-1] = y.shape[-1] - x.shape[-1]
x = np.concatenate((x, np.zeros(pad_shape)), -1)
else:
pad_shape = list(y.shape)
pad_shape[-1] = x.shape[-1] - y.shape[-1]
y = np.concatenate((y, np.zeros(pad_shape)), -1)
# X and Y are same length now, can test nperseg with either
if x.shape[-1] < nperseg:
warnings.warn('nperseg = {0:d}, is greater than input length = {1:d}, '
'using nperseg = {1:d}'.format(nperseg, x.shape[-1]))
nperseg = x.shape[-1]
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
elif noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
else:
noverlap = int(noverlap)
# Handle detrending and window functions
if not detrend:
def detrend_func(d):
return d
elif not hasattr(detrend, '__call__'):
def detrend_func(d):
return signaltools.detrend(d, type=detrend, axis=-1)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(d):
d = np.rollaxis(d, -1, axis)
d = detrend(d)
return np.rollaxis(d, axis, len(d.shape))
else:
detrend_func = detrend
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of nperseg')
if np.result_type(win,np.complex64) != outdtype:
win = win.astype(outdtype)
if mode == 'psd':
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
else:
scale = 1
if return_onesided is True:
if np.iscomplexobj(x):
sides = 'twosided'
else:
sides = 'onesided'
if not same_data:
if np.iscomplexobj(y):
sides = 'twosided'
else:
sides = 'twosided'
if sides == 'twosided':
num_freqs = nfft
elif sides == 'onesided':
if nfft % 2:
num_freqs = (nfft + 1)//2
else:
num_freqs = nfft//2 + 1
# Perform the windowed FFTs
result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft)
result = result[..., :num_freqs]
freqs = fftpack.fftfreq(nfft, 1/fs)[:num_freqs]
if not same_data:
# All the same operations on the y data
result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft)
result_y = result_y[..., :num_freqs]
result = np.conjugate(result) * result_y
elif mode == 'psd':
result = np.conjugate(result) * result
elif mode == 'magnitude':
result = np.absolute(result)
elif mode == 'angle' or mode == 'phase':
result = np.angle(result)
elif mode == 'complex':
pass
result *= scale
if sides == 'onesided':
if nfft % 2:
result[...,1:] *= 2
else:
# Last point is unpaired Nyquist freq point, don't double
result[...,1:-1] *= 2
t = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1, nperseg - noverlap)/float(fs)
if sides != 'twosided' and not nfft % 2:
# get the last value correctly, it is negative otherwise
freqs[-1] *= -1
# we unwrap the phase here to handle the onesided vs. twosided case
if mode == 'phase':
result = np.unwrap(result, axis=-1)
result = result.astype(outdtype)
# All imaginary parts are zero anyways
if same_data and mode != 'complex':
result = result.real
# Output is going to have new last axis for window index
if axis != -1:
# Specify as positive axis index
if axis < 0:
axis = len(result.shape)-1-axis
# Roll frequency axis back to axis where the data came from
result = np.rollaxis(result, -1, axis)
else:
# Make sure window/time index is last axis
result = np.rollaxis(result, -1, -2)
return freqs, t, result
def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft):
'''
Calculate windowed FFT, for internal use by scipy.signal._spectral_helper
This is a helper function that does the main FFT calculation for
_spectral helper. All input valdiation is performed there, and the data
axis is assumed to be the last axis of x. It is not designed to be called
externally. The windows are not averaged over; the result from each window
is returned.
Returns
-------
result : ndarray
Array of FFT data
References
----------
stackoverflow: Repeat NumPy array without replicating data?
<http://stackoverflow.com/a/5568169>
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
'''
# Created strided array of data segments
if nperseg == 1 and noverlap == 0:
result = x[..., np.newaxis]
else:
step = nperseg - noverlap
shape = x.shape[:-1]+((x.shape[-1]-noverlap)//step, nperseg)
strides = x.strides[:-1]+(step*x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape,
strides=strides)
# Detrend each data segment individually
result = detrend_func(result)
# Apply window by multiplication
result = win * result
# Perform the fft. Acts on last axis by default. Zero-pads automatically
result = fftpack.fft(result, n=nfft)
return result
| bsd-3-clause |
akrherz/iem | htdocs/plotting/auto/scripts/p37.py | 1 | 7055 | """MOS plot"""
import datetime
import psycopg2.extras
import numpy as np
import pandas as pd
from pyiem.plot.use_agg import plt
from pyiem.util import get_autoplot_context, get_dbconn
PDICT = {
"NAM": "NAM (9 Dec 2008 - current)",
"GFS": "GFS (16 Dec 2003 - current)",
"ETA": "ETA (24 Feb 2002 - 9 Dec 2008)",
"AVN": "AVN (1 Jun 2000 - 16 Dec 2003)",
}
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc["data"] = True
desc["cache"] = 86400
desc[
"description"
] = """This chart displays the combination of
Model Output Statistics (MOS) forecasts and actual observations
by the automated station the MOS forecast is for. MOS is forecasting
high and low temperatures over 12 hours periods, so these values are not
actual calendar day high and low temperatures."""
today = datetime.date.today()
desc["arguments"] = [
dict(
type="zstation",
name="zstation",
default="DSM",
label="Select Station:",
network="IA_ASOS",
),
dict(
type="month",
name="month",
label="Select Month:",
default=today.month,
),
dict(
type="year",
name="year",
label="Select Year:",
default=today.year,
min=2000,
),
dict(
type="select",
name="model",
default="NAM",
label="Select MOS Model:",
options=PDICT,
),
]
return desc
def plotter(fdict):
""" Go """
asos_pgconn = get_dbconn("asos")
acursor = asos_pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
mos_pgconn = get_dbconn("mos")
mcursor = mos_pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
ctx = get_autoplot_context(fdict, get_description())
station = ctx["zstation"]
year = ctx["year"]
month = ctx["month"]
model = ctx["model"]
# Extract the range of forecasts for each day for approximately
# the given month
month1 = datetime.datetime(year, month, 1)
sts = month1 - datetime.timedelta(days=7)
ets = month1 + datetime.timedelta(days=32)
mcursor.execute(
"""
SELECT date(ftime),
min(case when
extract(hour from ftime at time zone 'UTC') = 12
then n_x else null end) as min_morning,
max(case when
extract(hour from ftime at time zone 'UTC') = 12
then n_x else null end) as max_morning,
min(case when
extract(hour from ftime at time zone 'UTC') = 0
then n_x else null end) as min_afternoon,
max(case when
extract(hour from ftime at time zone 'UTC') = 0
then n_x else null end) as max_afternoon
from alldata WHERE station = %s and runtime BETWEEN %s and %s
and model = %s GROUP by date
""",
("K" + station, sts, ets, model),
)
mosdata = {}
for row in mcursor:
mosdata[row[0]] = {
"morning": [
row[1] if row[1] is not None else np.nan,
row[2] if row[2] is not None else np.nan,
],
"afternoon": [
row[3] if row[3] is not None else np.nan,
row[4] if row[4] is not None else np.nan,
],
}
# Go and figure out what the observations where for this month, tricky!
acursor.execute(
"""
SELECT date(valid),
min(case when extract(hour from valid at time zone 'UTC') between 0 and 12
then tmpf else null end) as morning_min,
max(case when extract(hour from valid at time zone 'UTC') between 12 and 24
then tmpf else null end) as afternoon_max
from alldata WHERE station = %s and valid between %s and %s
GROUP by date
""",
(station, sts, ets),
)
obs = {}
for row in acursor:
obs[row[0]] = {
"min": row[1] if row[1] is not None else np.nan,
"max": row[2] if row[2] is not None else np.nan,
}
htop = []
hbottom = []
ltop = []
lbottom = []
hobs = []
lobs = []
now = month1.date()
days = []
rows = []
while now.month == month:
days.append(now.day)
lbottom.append(
mosdata.get(now, {}).get("morning", [np.nan, np.nan])[0]
)
ltop.append(mosdata.get(now, {}).get("morning", [np.nan, np.nan])[1])
hbottom.append(
mosdata.get(now, {}).get("afternoon", [np.nan, np.nan])[0]
)
htop.append(mosdata.get(now, {}).get("afternoon", [np.nan, np.nan])[1])
hobs.append(obs.get(now, {}).get("max", np.nan))
lobs.append(obs.get(now, {}).get("min", np.nan))
rows.append(
dict(
day=now,
low_min=lbottom[-1],
low_max=ltop[-1],
high_min=hbottom[-1],
high_max=htop[-1],
high=hobs[-1],
low=lobs[-1],
)
)
now += datetime.timedelta(days=1)
df = pd.DataFrame(rows)
days = np.array(days)
hbottom = np.ma.fix_invalid(hbottom)
hobs = np.ma.fix_invalid(hobs)
lobs = np.ma.fix_invalid(lobs)
(fig, ax) = plt.subplots(1, 1, figsize=(8, 6))
ax.set_title(
"[%s] %s Daily Temperatures\n%s Forecast MOS Range for %s"
% (
station,
ctx["_nt"].sts[station]["name"],
model,
month1.strftime("%B %Y"),
)
)
arr = (df["high_max"] - df["high_min"]).values
ax.bar(
days + 0.1,
arr,
facecolor="pink",
width=0.7,
bottom=hbottom,
zorder=1,
alpha=0.5,
label="Daytime High",
align="center",
)
arr = (df["low_max"] - df["low_min"]).values
ax.bar(
days - 0.1,
arr,
facecolor="blue",
width=0.7,
bottom=df["low_min"].values,
zorder=1,
alpha=0.3,
label="Morning Low",
align="center",
)
ax.scatter(days + 0.1, hobs, zorder=2, s=40, c="red", label="Actual High")
ax.scatter(days - 0.1, lobs, zorder=2, s=40, c="blue", label="Actual Low")
ax.set_ylabel(r"Temperature $^{\circ}\mathrm{F}$")
ax.grid()
next1 = ets.replace(day=1)
days = (next1 - month1).days
ax.set_xlim(0, days + 0.5)
ax.set_xticks(range(1, days + 1, 2))
ax.set_xlabel("Day of %s" % (month1.strftime("%B %Y")))
# Shrink current axis's height by 10% on the bottom
box = ax.get_position()
ax.set_position(
[box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9]
)
# Put a legend below current axis
ax.legend(
loc="upper center",
bbox_to_anchor=(0.5, -0.1),
fancybox=True,
shadow=True,
ncol=4,
scatterpoints=1,
fontsize=12,
)
return fig, df
if __name__ == "__main__":
plotter(dict(network="TX_ASOS", zstation="AUS", month=1, year=2018))
| mit |
djgagne/scikit-learn | sklearn/tests/test_cross_validation.py | 29 | 46740 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1./3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
MDAnalysis/mdanalysis | package/MDAnalysis/analysis/polymer.py | 1 | 12047 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Polymer analysis --- :mod:`MDAnalysis.analysis.polymer`
=======================================================
:Author: Richard J. Gowers
:Year: 2015, 2018
:Copyright: GNU Public License v3
This module contains various commonly used tools in analysing polymers.
"""
import numpy as np
import scipy.optimize
import warnings
import logging
from .. import NoDataError
from ..core.groups import requires, AtomGroup
from ..lib.distances import calc_bonds
from .base import AnalysisBase
logger = logging.getLogger(__name__)
@requires('bonds')
def sort_backbone(backbone):
"""Rearrange a linear AtomGroup into backbone order
Requires that the backbone has bond information,
and that only backbone atoms are provided (ie no side
chains or hydrogens).
Parameters
----------
backbone : AtomGroup
the backbone atoms, not necessarily in order
Returns
-------
sorted_backbone : AtomGroup
backbone in order, so `sorted_backbone[i]` is bonded to
`sorted_backbone[i - 1]` and `sorted_backbone[i + 1]`
.. versionadded:: 0.20.0
"""
if not backbone.n_fragments == 1:
raise ValueError("{} fragments found in backbone. "
"backbone must be a single contiguous AtomGroup"
"".format(backbone.n_fragments))
branches = [at for at in backbone
if len(at.bonded_atoms & backbone) > 2]
if branches:
# find which atom has too many bonds for easier debug
raise ValueError(
"Backbone is not linear. "
"The following atoms have more than two bonds in backbone: {}."
"".format(','.join(str(a) for a in branches)))
caps = [atom for atom in backbone
if len(atom.bonded_atoms & backbone) == 1]
if not caps:
# cyclical structure
raise ValueError("Could not find starting point of backbone, "
"is the backbone cyclical?")
# arbitrarily choose one of the capping atoms to be the startpoint
sorted_backbone = AtomGroup([caps[0]])
# iterate until the sorted chain length matches the backbone size
while len(sorted_backbone) < len(backbone):
# current end of the chain
end_atom = sorted_backbone[-1]
# look at all bonded atoms which are also part of the backbone
# and subtract any that have already been added
next_atom = (end_atom.bonded_atoms & backbone) - sorted_backbone
# append this to the sorted backbone
sorted_backbone += next_atom
return sorted_backbone
class PersistenceLength(AnalysisBase):
r"""Calculate the persistence length for polymer chains
The persistence length is the length at which two points on the polymer
chain become decorrelated. This is determined by first measuring the
autocorrelation (:math:`C(n)`) of two bond vectors
(:math:`\mathbf{a}_i, \mathbf{a}_{i + n}`) separated by :math:`n` bonds
.. math::
C(n) = \langle \cos\theta_{i, i+n} \rangle =
\langle \mathbf{a_i} \cdot \mathbf{a_{i+n}} \rangle
An exponential decay is then fitted to this, which yields the
persistence length
.. math::
C(n) \approx \exp\left( - \frac{n \bar{l_B}}{l_P} \right)
where :math:`\bar{l_B}` is the average bond length, and :math:`l_P` is
the persistence length which is fitted
Parameters
----------
atomgroups : iterable
List of AtomGroups. Each should represent a single
polymer chain, ordered in the correct order.
verbose : bool, optional
Show detailed progress of the calculation if set to ``True``.
Attributes
----------
results.bond_autocorrelation : numpy.ndarray
the measured bond autocorrelation
results.lb : float
the average bond length
.. versionadded:: 2.0.0
lb : float
Alias to the :attr:`results.lb`.
.. deprecated:: 2.0.0
Will be removed in MDAnalysis 3.0.0. Please use
:attr:`results.lb` instead.
results.x : numpy.ndarray
length of the decorrelation predicted by *lp*
.. versionadded:: 2.0.0
results.lp : float
calculated persistence length
.. versionadded:: 2.0.0
lp : float
Alias to the :attr:`results.lp`.
.. deprecated:: 2.0.0
Will be removed in MDAnalysis 3.0.0. Please use
:attr:`results.lp` instead.
results.fit : numpy.ndarray
the modelled backbone decorrelation predicted by *lp*
.. versionadded:: 2.0.0
fit : float
Alias to the :attr:`results.fit`.
.. deprecated:: 2.0.0
Will be removed in MDAnalysis 3.0.0. Please use
:attr:`results.fit` instead.
See Also
--------
:func:`sort_backbone`
for producing the sorted AtomGroup required for input.
Example
-------
.. code-block:: python
from MDAnalysis.tests.datafiles import TRZ_psf, TRZ
import MDAnalysis as mda
from MDAnalysis.analysis import polymer
u = mda.Universe(TRZ_psf, TRZ)
# this system is a pure polymer melt of polyamide,
# so we can select the chains by using the .fragments attribute
chains = u.atoms.fragments
# select only the backbone atoms for each chain
backbones = [chain.select_atoms('not name O* H*') for chain in chains]
# sort the chains, removing any non-backbone atoms
sorted_backbones = [polymer.sort_backbone(bb) for bb in backbones]
persistence_length = polymer.PersistenceLength(sorted_backbones)
# Run the analysis, this will average over all polymer chains
# and all timesteps in trajectory
persistence_length = persistence_length.run()
print(f'The persistence length is: {persistence_length.results.lp}')
# always check the visualisation of this:
persistence_length.plot()
.. versionadded:: 0.13.0
.. versionchanged:: 0.20.0
The run method now automatically performs the exponential fit
.. versionchanged:: 1.0.0
Deprecated :meth:`PersistenceLength.perform_fit` has now been removed.
.. versionchanged:: 2.0.0
Former ``results`` are now stored as ``results.bond_autocorrelation``.
:attr:`lb`, :attr:`lp`, :attr:`fit` are now stored in a
:class:`MDAnalysis.analysis.base.Results` instance.
"""
def __init__(self, atomgroups, **kwargs):
super(PersistenceLength, self).__init__(
atomgroups[0].universe.trajectory, **kwargs)
self._atomgroups = atomgroups
# Check that all chains are the same length
lens = [len(ag) for ag in atomgroups]
chainlength = len(atomgroups[0])
if not all(l == chainlength for l in lens):
raise ValueError("Not all AtomGroups were the same size")
self._results = np.zeros(chainlength - 1, dtype=np.float32)
def _single_frame(self):
# could optimise this by writing a "self dot array"
# we're only using the upper triangle of np.inner
# function would accept a bunch of coordinates and spit out the
# decorrel for that
n = len(self._atomgroups[0])
for chain in self._atomgroups:
# Vector from each atom to next
vecs = chain.positions[1:] - chain.positions[:-1]
# Normalised to unit vectors
vecs /= np.sqrt((vecs * vecs).sum(axis=1))[:, None]
inner_pr = np.inner(vecs, vecs)
for i in range(n-1):
self._results[:(n-1)-i] += inner_pr[i, i:]
@property
def lb(self):
wmsg = ("The `lb` attribute was deprecated in "
"MDAnalysis 2.0.0 and will be removed in MDAnalysis 3.0.0. "
"Please use `results.variance` instead.")
warnings.warn(wmsg, DeprecationWarning)
return self.results.lb
@property
def lp(self):
wmsg = ("The `lp` attribute was deprecated in "
"MDAnalysis 2.0.0 and will be removed in MDAnalysis 3.0.0. "
"Please use `results.variance` instead.")
warnings.warn(wmsg, DeprecationWarning)
return self.results.lp
@property
def fit(self):
wmsg = ("The `fit` attribute was deprecated in "
"MDAnalysis 2.0.0 and will be removed in MDAnalysis 3.0.0. "
"Please use `results.variance` instead.")
warnings.warn(wmsg, DeprecationWarning)
return self.results.fit
def _conclude(self):
n = len(self._atomgroups[0])
norm = np.linspace(n - 1, 1, n - 1)
norm *= len(self._atomgroups) * self.n_frames
self.results.bond_autocorrelation = self._results / norm
self._calc_bond_length()
self._perform_fit()
def _calc_bond_length(self):
"""calculate average bond length"""
bs = []
for ag in self._atomgroups:
pos = ag.positions
b = calc_bonds(pos[:-1], pos[1:]).mean()
bs.append(b)
self.results.lb = np.mean(bs)
def _perform_fit(self):
"""Fit the results to an exponential decay"""
try:
self.results.bond_autocorrelation
except AttributeError:
raise NoDataError("Use the run method first") from None
self.results.x = self.results.lb *\
np.arange(len(self.results.bond_autocorrelation))
self.results.lp = fit_exponential_decay(self.results.x,
self.results.bond_autocorrelation)
self.results.fit = np.exp(-self.results.x/self.results.lp)
def plot(self, ax=None):
"""Visualise the results and fit
Parameters
----------
ax : matplotlib.Axes, optional
if provided, the graph is plotted on this axis
Returns
-------
ax : the axis that the graph was plotted on
"""
import matplotlib.pyplot as plt
if ax is None:
fig, ax = plt.subplots()
ax.plot(self.results.x,
self.results.bond_autocorrelation,
'ro',
label='Result')
ax.plot(self.results.x,
self.results.fit,
label='Fit')
ax.set_xlabel(r'x')
ax.set_ylabel(r'$C(x)$')
ax.set_xlim(0.0, 40 * self.results.lb)
ax.legend(loc='best')
return ax
def fit_exponential_decay(x, y):
r"""Fit a function to an exponential decay
.. math:: y = \exp\left(- \frac{x}{a}\right)
Parameters
----------
x, y : array_like
The two arrays of data
Returns
-------
a : float
The coefficient *a* for this decay
Notes
-----
This function assumes that data starts at 1.0 and decays to 0.0
"""
def expfunc(x, a):
return np.exp(-x/a)
a = scipy.optimize.curve_fit(expfunc, x, y)[0][0]
return a
| gpl-2.0 |
HolgerPeters/scikit-learn | sklearn/linear_model/randomized_l1.py | 4 | 24781 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import _preprocess_data
from ..base import BaseEstimator
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..feature_selection.base import SelectorMixin
from ..utils import (as_float_array, check_random_state, check_X_y, safe_mask)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
from ..exceptions import ConvergenceWarning
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.randint(
0, 2, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
SelectorMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_preprocess_data = staticmethod(_preprocess_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], y_numeric=True,
ensure_min_samples=2, estimator=self)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_offset, y_offset, X_scale = \
self._preprocess_data(X, y, self.fit_intercept, self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def _get_support_mask(self):
"""Get the boolean mask indicating which features are selected.
Returns
-------
support : boolean array of shape [# input features]
An element is True iff its corresponding feature is selected
for retention.
"""
check_is_fitted(self, 'scores_')
return self.scores_ > self.selection_threshold
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float64))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by subsampling the training data and
computing a Lasso estimate where the penalty of a random subset of
coefficients has been scaled. By performing this double
randomization several times, the method assigns high scores to
features that are repeatedly selected across randomizations. This
is known as stability selection. In short, features selected more
often are considered good features.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The s parameter used to randomly scale the penalty of different
features (See :ref:`User Guide <randomized_l1>` for details ).
Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold : float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learned more robust and almost independent of
the number of samples. The same property is not valid for
standardized data. However, if you wish to standardize, please
use `preprocessing.StandardScaler` before calling `fit` on an
estimator with `normalize=False`.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, Lasso, ElasticNet
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if isinstance(alpha, six.string_types) and alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float64))
if C.ndim > 1:
raise ValueError("C should be 1-dimensional array-like, "
"but got a {}-dimensional array-like instead: {}."
.format(C.ndim, C))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Logistic Regression works by subsampling the training
data and fitting a L1-penalized LogisticRegression model where the
penalty of a random subset of coefficients has been scaled. By
performing this double randomization several times, the method
assigns high scores to features that are repeatedly selected across
randomizations. This is known as stability selection. In short,
features selected more often are considered good features.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float or array-like of shape [n_reg_parameter], optional, default=1
The regularization parameter C in the LogisticRegression.
When C is an array, fit will take each regularization parameter in C
one by one for LogisticRegression and store results for each one
in ``all_scores_``, where columns and rows represent corresponding
reg_parameters and features.
scaling : float, optional, default=0.5
The s parameter used to randomly scale the penalty of different
features (See :ref:`User Guide <randomized_l1>` for details ).
Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, LogisticRegression
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _preprocess_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize=normalize)
return X, y, X_offset, y, X_scale
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in ascending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stability path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.randint(0, 2, size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/sklearn/utils/tests/test_weighted_mode.py | 3 | 1095 | import numpy as np
from nose.tools import assert_true
from sklearn.utils.extmath import weighted_mode
from scipy import stats
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_true(np.all(mode == mode_result))
assert_true(np.all(score.ravel() == w[:, :5].sum(1)))
if __name__ == '__main__':
import nose
nose.runmodule()
| agpl-3.0 |
pratapvardhan/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.